hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f5e45394a67bcb9a07150dcb0360887e8a8d81f
| 2,347
|
bzl
|
Python
|
closure/private/files_equal_test.bzl
|
ribrdb/rules_closure
|
814465e8d46d5375415193f33d8abbb5ee5d6a6e
|
[
"Apache-2.0"
] | 1
|
2019-08-25T15:11:13.000Z
|
2019-08-25T15:11:13.000Z
|
closure/private/files_equal_test.bzl
|
ribrdb/rules_closure
|
814465e8d46d5375415193f33d8abbb5ee5d6a6e
|
[
"Apache-2.0"
] | 1
|
2022-03-01T18:29:50.000Z
|
2022-03-01T18:29:50.000Z
|
closure/private/files_equal_test.bzl
|
ribrdb/rules_closure
|
814465e8d46d5375415193f33d8abbb5ee5d6a6e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Closure Rules Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that two files contain the same data."""
def _impl(ctx):
if ctx.file.golden == ctx.file.actual:
fail("GOLDEN and ACTUAL should be different files")
ctx.file_action(
output=ctx.outputs.executable,
content="\n".join([
"#!/bin/bash",
"function checksum() {",
" if command -v openssl >/dev/null; then",
" openssl sha1 $1 | cut -f 2 -d ' '",
" elif command -v sha256sum >/dev/null; then",
" sha256sum $1 | cut -f 1 -d ' '",
" elif command -v shasum >/dev/null; then",
" cat $1 | shasum -a 256 | cut -f 1 -d ' '",
" else",
" echo please install openssl >&2",
" exit 1",
" fi",
"}",
"SUM1=$(checksum %s)" % ctx.file.golden.short_path,
"SUM2=$(checksum %s)" % ctx.file.actual.short_path,
"if [[ ${SUM1} != ${SUM2} ]]; then",
" echo ERROR: %s >&2" % ctx.attr.error_message,
" echo %s ${SUM1} >&2" % ctx.file.golden.short_path,
" echo %s ${SUM2} >&2" % ctx.file.actual.short_path,
" exit 1",
"fi",
]),
executable=True)
return struct(runfiles=ctx.runfiles([ctx.file.golden,
ctx.file.actual]))
files_equal_test = rule(
attrs = {
"golden": attr.label(
mandatory = True,
allow_files = True,
single_file = True),
"actual": attr.label(
mandatory = True,
allow_files = True,
single_file = True),
"error_message": attr.string(
default="FILES DO NOT HAVE EQUAL CONTENTS"),
},
implementation = _impl,
test = True)
| 36.671875
| 74
| 0.563698
|
b3b0231227343c2351e05199296c6d3a0def9baf
| 1,187
|
py
|
Python
|
src/gluonts/nursery/few_shot_prediction/src/meta/callbacks/__init__.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | null | null | null |
src/gluonts/nursery/few_shot_prediction/src/meta/callbacks/__init__.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | null | null | null |
src/gluonts/nursery/few_shot_prediction/src/meta/callbacks/__init__.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from .plot import (
ForecastPlotLoggerCallback,
ForecastSupportSetAttentionPlotLoggerCallback,
LossPlotLoggerCallback,
CheatLossPlotLoggerCallback,
MacroCRPSPlotCallback,
)
from .count import ParameterCountCallback
from .save import InitialSaveCallback
from .metric import QuantileMetricLoggerCallback
__all__ = [
"ForecastPlotLoggerCallback",
"ParameterCountCallback",
"InitialSaveCallback",
"ForecastSupportSetAttentionPlotLoggerCallback",
"LossPlotLoggerCallback",
"CheatLossPlotLoggerCallback",
"MacroCRPSPlotCallback",
"QuantileMetricLoggerCallback",
]
| 33.914286
| 75
| 0.775063
|
4b1e99bcd9a6c625a8a8a52833d6188c4fd010f1
| 15,127
|
py
|
Python
|
tools/genattrs.py
|
rabinv/tsplex
|
bfa2b229086a7ad57dbd9fa462edfd4c5e71ca77
|
[
"Apache-2.0"
] | null | null | null |
tools/genattrs.py
|
rabinv/tsplex
|
bfa2b229086a7ad57dbd9fa462edfd4c5e71ca77
|
[
"Apache-2.0"
] | null | null | null |
tools/genattrs.py
|
rabinv/tsplex
|
bfa2b229086a7ad57dbd9fa462edfd4c5e71ca77
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import json
import pprint
import attr
import sqlite3
import os
import re
import sys
import jinja2
import logging
import itertools
from genchars import Char
from tagger import Tagger
@attr.s
class Attribute(object):
name: str = attr.ib()
group: str = attr.ib()
tagprefix: str = attr.ib()
stateprefix: str = attr.ib()
defaultstate: str = attr.ib(None)
states = attr.ib(factory=list)
tagid: int = attr.ib(default=-1)
@attr.s
class AttributeState(object):
name: str = attr.ib()
tagid: int = attr.ib()
class AttributeGen(object):
def __init__(self, chars):
self.tagger = Tagger(dict([(c.char, c) for c in chars]))
self.chars = chars
self.tags = ['INVALID']
self.tagmap = {}
self.tagidx = 1
self.transmap = dict([(c.name, c.sv.capitalize()) for c in self.chars])
self.transmap.update({
# These are fixed up to use the terms used in the textual
# transcriptions
'interaction_type_angling': 'Vinkelkontakt',
'interaction_type_diverging': 'Förs från varandra',
'interaction_type_entering': 'Entré / Flätas',
'interaction_type_converging': 'Förs mot varandra',
'interaction_type_crossing': 'Korskontakt',
'interaction_type_exchanging': 'Byter plats med varandra',
'interaction_type_hooking': 'Hakas',
'motion_type_circling': 'Förs i cirkel',
'motion_type_curving': 'Förs i båge',
'other_repetition': 'Upprepas',
'attitude_pointed_backward': 'Inåtriktad',
'attitude_turned_backward': 'Inåtvänd',
# Pseudo-tags generated by massage_tags
'hands_one': 'En hand',
'hands_two': 'Två händer, båda aktiva',
'hands_two_but_one_active': 'Två händer, en aktiv',
'position_hand': '(Handen)',
'position_unspecified': '(Neutrala läget)',
'contact_unspecified': '(Kontakt)',
'motion_type_moving': 'Förs',
'motion_type_fingers': 'Fingrarna',
'hands_any': 'En eller två händer',
'position_any': 'Alla lägen',
'handshape_any': 'Alla handformer',
'attitude_pointed_any': 'Alla riktningar',
'attitude_turned_any': 'Alla vridningar',
'handshape_same': '(Samma handform)',
'attitude_pointed_same': '(Samma riktning)',
'attitude_turned_same': '(Samma vridning)',
'attitude_pointed_symmetric': '(Symmetrisk riktning)',
'attitude_turned_symmetric': '(Symmetrisk vridning)',
# Titles for filters in UI
'': '',
'hands': 'Händer',
'position': 'Läge',
'right': 'Höger hand',
'left': 'Vänster hand',
'actions': 'Rörelse',
})
def massage_tag(self, t):
if t.endswith('modifier_medial_contact'):
return 'action-interaction_type_contact-modifier_medial_contact'
if t == 'action-interaction_type_contact':
return 'action-interaction_type_contact-contact_unspecified'
if t.startswith('handshape'):
return 'position_hand'
if t == 'left-handshape_none':
return None
# The states for these attributes are not complete. See validate().
if 'action-interaction_type_diverging-modifier' in t:
return 'action-interaction_type_diverging'
if 'position-relation' in t:
return None
parts = t.split('-')
if len(parts) > 1 and parts[0] == 'action':
if parts[1] in [
'motion_backward',
'motion_backward_short',
'motion_rightward',
'motion_rightward_short',
'motion_downward',
'motion_downward_short',
'motion_depthwise',
'motion_leftward',
'motion_leftward_short',
'motion_forward',
'motion_forward_short',
'motion_upward',
'motion_upward_short',
'motion_sideways',
'motion_vertically',
]:
direction = parts[1].split('_')[1]
if direction == 'backward':
direction = 'backwards'
parts[1] = 'motion_type_moving-modifier_' + direction
return '-'.join(parts)
elif parts[1] in [
'motion_type_bending',
'motion_type_playing',
'motion_type_strewing',
'motion_type_waving',
]:
parts[1] = 'motion_type_fingers-' + parts[1]
return '-'.join(parts)
elif parts[1] in [
'interaction_type_angling',
'interaction_type_crossing',
'interaction_type_entering',
'interaction_type_hooking'
]:
parts[1] = 'interaction_type_contact-' + parts[1]
return '-'.join(parts)
return t
def massage_tags(self, sign, tags):
tags = [self.massage_tag(t) for t in tags]
tags = [t for t in tags if t]
if sign.hands == 1:
# handshape_none is the fake left hand which the tagger adds for
# the attitude of position_arm_lower.
if sign.left and sign.left.shape != 'handshape_none':
tags.append('hands_two_but_one_active')
else:
tags.append('hands_one')
elif sign.hands == 2:
tags.append('hands_two')
if sign.left:
if sign.left.shape == sign.right.shape:
tags.extend(['left-handshape_same',
'right-handshape_same'])
if sign.left.pointed == sign.right.pointed:
tags.extend(['left-attitude_pointed_same',
'right-attitude_pointed_same'])
if sign.left.turned == sign.right.turned:
tags.extend(['left-attitude_turned_same',
'right-attitude_turned_same'])
symmetric = [
('forward', 'backward'),
('backward', 'forward'),
('leftward', 'rightward'),
('rightward', 'leftward'),
('upward', 'downward'),
('downward', 'upward'),
]
def direction(v):
return v.split('_')[-1]
if (direction(sign.left.pointed), direction(sign.right.pointed)) in symmetric:
tags.extend(['left-attitude_pointed_symmetric',
'right-attitude_pointed_symmetric'])
if (direction(sign.left.turned), direction(sign.right.turned)) in symmetric:
tags.extend(['left-attitude_turned_symmetric',
'right-attitude_turned_symmetric'])
if any(t for t in tags if 'motion_type_moving' in t):
tags.append('action-motion_type_moving')
if any(t for t in tags if 'fingers' in t):
tags.append('action-motion_type_fingers')
if any(t for t in tags if 'contact' in t):
tags.append('action-interaction_type_contact')
return sorted(list(set(tags)))
def tag(self, signs):
for sign in signs:
sign['tagids'] = []
idn = int(sign['id-nummer'])
segs = self.tagger.tag(sign)
if not segs:
continue
for seg in segs:
assert seg.tags
tags = self.massage_tags(seg, seg.tags)
tagids = []
for t in tags:
try:
v = self.tagmap[t]
except KeyError:
v = self.tagidx
self.tagmap[t] = self.tagidx
self.tags.append(t)
self.tagidx += 1
tagids.append(v)
sign['tagids'].append(tuple(tagids))
return signs
def get_attribs(self):
tags = list(self.tagmap.keys())
attribs = [
Attribute(name='hands',
group='',
defaultstate='hands_any',
tagprefix='hands_',
stateprefix=''),
Attribute(name='position',
group='',
defaultstate='position_any',
tagprefix='position_',
stateprefix=''),
Attribute(name='right',
group='right',
defaultstate='handshape_any',
tagprefix='right-handshape',
stateprefix='right-'),
Attribute(name='right',
group='right',
defaultstate='attitude_pointed_any',
tagprefix='right-attitude_pointed',
stateprefix='right-'),
Attribute(name='right',
group='right',
defaultstate='attitude_turned_any',
tagprefix='right-attitude_turned',
stateprefix='right-'),
Attribute(name='left',
group='left',
defaultstate='handshape_any',
tagprefix='left-handshape',
stateprefix='left-'),
Attribute(name='left',
group='left',
defaultstate='attitude_pointed_any',
tagprefix='left-attitude_pointed',
stateprefix='left-'),
Attribute(name='left',
group='left',
defaultstate='attitude_turned_any',
tagprefix='left-attitude_turned',
stateprefix='left-'),
]
actionattribs = []
for t in tags:
if not t.startswith('action-'):
continue
parts = t.split('-')
if len(parts) > 2:
continue
action = parts[1]
actionattribs.append(Attribute(name=action,
group='actions',
tagprefix=t,
stateprefix=action))
actionattribs = sorted(actionattribs, key=lambda a:self.transmap[a.name])
attribs += actionattribs
for attrib in attribs:
for t in [t for t in tags if t.startswith(attrib.tagprefix)]:
if 'short' in t and 'short' not in attrib.stateprefix:
print(attrib, t)
continue
tags.remove(t)
statename = t.replace('action-', '').replace(attrib.stateprefix, '').lstrip('-')
if not statename or 'any' in statename and not attrib.defaultstate:
assert attrib.tagid == -1
attrib.tagid = self.tagmap[t]
continue
state = AttributeState(name=statename, tagid=self.tagmap[t])
attrib.states.append(state)
attrib.states = sorted(attrib.states, key=lambda s:self.transmap[s.name])
assert not any(a for a in attribs if a.defaultstate and a.tagid != -1)
assert not any(a for a in attribs if not a.defaultstate and a.tagid == -1)
if tags:
pprint.pprint(tags)
assert not tags
return attribs
def validate(self, attribs, signs):
def signs_with_tag(tagid):
if tagid == -1:
return [s['id-nummer'] for s in signs if s['tagids']]
return [s['id-nummer'] for s in signs if [seg for seg in s['tagids'] if tagid in seg]]
for a in attribs:
if not a.states or a.group == 'left':
continue
assert(len(a.states) > 1)
match = sorted(signs_with_tag(a.tagid))
assert(len(match))
smatch = []
for st in a.states:
thismatch = signs_with_tag(st.tagid)
assert len(thismatch) != len(match)
smatch.extend(thismatch)
smatch = sorted(set(smatch))
if match != smatch:
print(set(match) - set(smatch))
print(a)
print(len(match))
print(len(smatch))
assert False
def gen(self, attribs):
env = jinja2.Environment(trim_blocks=False, lstrip_blocks=True,
undefined=jinja2.StrictUndefined)
template = env.from_string('''
// Auto-generated. Do not edit.
package `in`.rab.tsplex
object Attributes {
val attributes = arrayOf(
{% for attr in attribs %}
Attribute(
name = "{{ transmap[attr.name] }}",
group = "{{ transmap[attr.group] }}",
{% if attr.defaultstate %}
defaultStateName = "{{ transmap[attr.defaultstate] }}",
{% endif %}
tagId = {{ attr.tagid }},
states = arrayOf(
{% for state in attr.states %}
AttributeState(
name = "{{ transmap[state.name] }}",
tagId = {{ state.tagid }}
){% if not loop.last %},{% endif %}
{% endfor %}
)
){% if not loop.last %},{% endif %}
{% endfor %}
)
val redundantTagIds = arrayOf(
{% for tagid in redundant %}
{{ tagid }}{% if not loop.last %},{% endif %}
{% endfor %}
)
}
'''.lstrip())
redundant = ['-'.join(x) for x in itertools.product(('left', 'right'), \
['handshape_same',
'attitude_pointed_same',
'attitude_turned_same',
'attitude_pointed_symmetric',
'attitude_turned_symmetric',
])]
return template.render(attribs=attribs, transmap=self.transmap,
redundant=[self.tagmap[t] for t in redundant])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--signs', default='signs.json')
parser.add_argument('--chars', default='chars.json')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
with open(args.signs, 'r') as f:
signs = json.load(f)
with open(args.chars, 'r') as f:
chars = [Char(**o) for o in json.load(f)]
attgen = AttributeGen(chars)
signs = attgen.tag(signs)
attribs = attgen.get_attribs()
attgen.validate(attribs, signs)
pprint.pprint(sorted(attgen.tagmap.items(), key=lambda t:t[0]))
print(attgen.gen(attribs))
if __name__ == '__main__':
main()
| 34.615561
| 98
| 0.51147
|
426a8b72401b77bac3195d0222cf54d036b323a0
| 565
|
py
|
Python
|
source/_static/code/scipy/bisection.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 56
|
2017-05-09T10:45:23.000Z
|
2022-01-20T20:33:27.000Z
|
source/_static/code/scipy/bisection.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 7
|
2017-06-30T01:52:46.000Z
|
2019-05-01T20:09:47.000Z
|
source/_static/code/scipy/bisection.py
|
tuttugu-ryo/lecture-source-py
|
9ce84044c2cc421775ea63a004556d7ae3b4e504
|
[
"BSD-3-Clause"
] | 117
|
2017-04-25T16:09:17.000Z
|
2022-03-23T02:30:29.000Z
|
def bisect(f, a, b, tol=10e-5):
"""
Implements the bisection root finding algorithm, assuming that f is a
real-valued function on [a, b] satisfying f(a) < 0 < f(b).
"""
lower, upper = a, b
while upper - lower > tol:
middle = 0.5 * (upper + lower)
# === if root is between lower and middle === #
if f(middle) > 0:
lower, upper = lower, middle
# === if root is between middle and upper === #
else:
lower, upper = middle, upper
return 0.5 * (upper + lower)
| 28.25
| 73
| 0.520354
|
b32d0c7abc26f50a9596acd6d16d263e34621876
| 5,606
|
py
|
Python
|
demos/redemo.py
|
houzw/knowledge-base-data
|
60771e8bf300227e1a26c9e77f56b09d23acd64a
|
[
"MIT"
] | null | null | null |
demos/redemo.py
|
houzw/knowledge-base-data
|
60771e8bf300227e1a26c9e77f56b09d23acd64a
|
[
"MIT"
] | null | null | null |
demos/redemo.py
|
houzw/knowledge-base-data
|
60771e8bf300227e1a26c9e77f56b09d23acd64a
|
[
"MIT"
] | 1
|
2018-12-17T06:40:53.000Z
|
2018-12-17T06:40:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2018/12/18 9:58
import re
v = re.search("Minimum: \d+\.\d+", "Minimum: 0.000000 Default: 1.000000")
v2 = re.search("Default: (\w+|\d+\.\d+)", "Minimum: 0.000000 Default: xxx")
# print(v)
# print(v2)
ac = re.match("Available Choices: [\[\]0-9A-Za-z -]+[Default:]?",
"Available Choices: [0] Nearest Neighbour "
"[1] Bilinear Interpolation "
"[2] Bicubic Spline Interpolation"
" [3] B-Spline Interpolation Default: 0")
# print(ac.group())
all = re.findall("\[[0-9]\][ a-zA-Z-]+", ac.group().replace("Default:", ''))
# for l in all:
# print(l)
# print(re.search("(?<=\[)[0-9]", l).group())
# print(re.search("[a-zA-Z -]+", l).group())
# print(re.search("(?P<choice>(?<=\[)[0-9])(?P<des>[a-zA-Z -]+)", l))
# http://www.saga-gis.org/saga_tool_doc/7.0.0/shapes_tools_0.html
test = "28 Parameters: - 1. Available Choices: [0] Albers Equal Area " \
"[1] Azimuthal Equidistant [2] Airy [3] Aitoff [4] Mod. Stererographics of Alaska " \
"[5] Apian Globular I [6] August Epicycloidal [7] Bacon Globular [8] Bipolar conic of western hemisphere " \
"[9] Boggs Eumorphic [10] Bonne (Werner lat_1=90) [11] Cassini [12] Central Cylindrical [13] Equal Area Cylindrical" \
" [14] Chamberlin Trimetric [15] Collignon [16] Craster Parabolic (Putnins P4) [17] Denoyer Semi-Elliptical [18] Eckert I " \
"[19] Eckert II [20] Eckert III [21] Eckert IV [22] Eckert V [23] Eckert VI [24] Equidistant Cylindrical (Plate Caree) " \
"[25] Equidistant Conic [26] Euler [27] Extended Transverse Mercator [28] Fahey [29] Foucaut [30] Foucaut Sinusoidal " \
"[31] Gall (Gall Stereographic) [32] Geocentric [33] Geostationary Satellite View [34] Ginsburg VIII (TsNIIGAiK)"
# http://www.saga-gis.org/saga_tool_doc/7.0.0/grid_tools_20.html
test2 = "3 Fields: - 1. [8 byte floating point number] Value in Grid 1 " \
"- 2. [8 byte floating point number] Value in Grid 2 " \
"- 3. [8 byte floating point number] Resulting Value"
# print(re.search("^\d+ Parameters:",test).group())
# print(re.search("^\d+ Fields:",test2))
test3 = "Available Choices: [0] Difference to left neighbour " \
"[1] Difference to left neighbour (using a while loop) " \
"[2] Slope gradient to left neighbour [%%] " \
"[3] Slope gradient to left neighbour [Degree] Default: 0"
# print(re.search("Available Choices: [\[\]\w()% -]+[Default:]?", test3).group())
# print(re.search("Default: [\w\.]+", test3).group())
t = "Menu: Spatial and Geostatistics|Geographically Weighted Regression"
t = t.replace("Menu: ", '')
# print(re.split("\|", t))
name = "Tool 13: Reprojecting a shapes layer"
name = re.sub("^Tool [0-9]+: ", '', name)
print(name)
name2 = "Measured Points (PC)"
print(re.sub("\([a-zA-Z ]+\)", '', name2))
ttt = None
if ttt:
print(True)
else:
print(False)
st = " / ? abscd, () *"
st_r = re.sub('[()/?,*]', '', st)
print(st_r)
def get_choices(_options):
to_append_id = -1
to_remove_ids = []
for i, option in enumerate(_options):
if option['data_type'] == 'Choices':
to_append_id = i
print(to_append_id)
_options[to_append_id]['availableChoices'] = []
print(option['parameter_name'])
else:
if option['data_type'] == 'Choice':
to_remove_ids.append(i)
choice = dict()
print(option['parameter_name'])
choice['choice'] = option['parameter_name'].lower()
choice['description'] = option['explanation']
_options[to_append_id]['availableChoices'].append(choice)
else:
to_append_id = -1
# 倒序移除,避免移除时原列表变化导致索引超出范围
# to_remove_ids.sort(reverse=True) # sort 会修改 to_remove_ids 并返回None!
for i in sorted(to_remove_ids,reverse=True):
_options.pop(i)
return _options
options = [
{
"flag": "channel",
"parameter_name": "Selected Channel",
"data_type": "Int",
"explanation": []
}, {
"flag": "ram",
"parameter_name": "Available RAM (Mb)",
"data_type": "Int",
"explanation": []
},
{
"flag": "structype",
"parameter_name": "Structuring Element Type",
"data_type": "Choices",
"explanation": []
},
{
"flag": "structype ball",
"parameter_name": "Ball",
"data_type": "Choice",
"explanation": []
},
{
"flag": "structype cross",
"parameter_name": "Cross",
"data_type": "Choice",
"explanation": []
},
{
"flag": "structype.ball.xradius",
"parameter_name": "The Structuring Element X Radius",
"data_type": "Int",
"explanation": [
"The Structuring Element X Radius."
]
},
{
"flag": "structype.ball.yradius",
"parameter_name": "The Structuring Element Y Radius",
"data_type": "Int",
"explanation": [
"The Structuring Element Y Radius."
]
},
{
"flag": "filter",
"parameter_name": "Morphological Operation",
"data_type": "Choices",
"explanation": []
},
{
"flag": "filter dilate",
"parameter_name": "Dilate",
"data_type": "Choice",
"explanation": []
},
{
"flag": "filter erode",
"parameter_name": "Erode",
"data_type": "Choice",
"explanation": []
},
{
"flag": "filter opening",
"parameter_name": "Opening",
"data_type": "Choice",
"explanation": []
},
{
"flag": "filter closing",
"parameter_name": "Closing",
"data_type": "Choice",
"explanation": []
},
{
"flag": "inxml",
"parameter_name": "Load otb application from xml file",
"data_type": "XML input parameters file",
"explanation": []
},
{
"flag": "outxml",
"parameter_name": "Save otb application to xml file",
"data_type": "XML output parameters file",
"explanation": []
}
]
print(get_choices(options))
| 31.144444
| 132
| 0.623796
|
bfdd157bdf943db3f3a75084d8c0b4d34351821c
| 74,486
|
py
|
Python
|
python/friesian/test/bigdl/friesian/feature/test_table.py
|
EmiCareOfCell44/BigDL
|
6278ee8eed09b5072da53dab3a99530cf5f69ba2
|
[
"Apache-2.0"
] | 3
|
2021-07-14T01:28:47.000Z
|
2022-03-02T01:16:32.000Z
|
python/friesian/test/bigdl/friesian/feature/test_table.py
|
liangs6212/BigDL
|
3c89ff7e8bbdc713110536c18099506811cd2b3a
|
[
"Apache-2.0"
] | null | null | null |
python/friesian/test/bigdl/friesian/feature/test_table.py
|
liangs6212/BigDL
|
3c89ff7e8bbdc713110536c18099506811cd2b3a
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
import os.path
import pytest
import hashlib
import operator
from unittest import TestCase
from pyspark.sql.functions import col, concat, max, min, array, udf, lit
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, ArrayType, \
DoubleType
from bigdl.orca import OrcaContext
from bigdl.friesian.feature import FeatureTable, StringIndex, TargetCode
from bigdl.dllib.nncontext import *
class TestTable(TestCase):
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
def test_apply(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
feature_tbl = feature_tbl.fillna(0, "col_1")
# udf on single column
transform = lambda x: x + 1
feature_tbl = feature_tbl.apply("col_1", "new_col_1", transform, dtype="int")
col1_values = feature_tbl.select("col_1").df.rdd.flatMap(lambda x: x).collect()
updated_col1_values = feature_tbl.select("new_col_1").df.rdd.flatMap(lambda x: x).collect()
assert [v + 1 for v in col1_values] == updated_col1_values
# udf on multi columns
transform = lambda x: "xxxx"
feature_tbl = feature_tbl.apply(["col_2", "col_4", "col_5"], "out", transform)
out_values = feature_tbl.select("out").df.rdd.flatMap(lambda x: x).collect()
assert out_values == ["xxxx"] * len(out_values)
def test_apply_with_data(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
feature_tbl = feature_tbl.fillna(0, "col_1")
# udf on single column
y = {"ace": 1, "aa": 2}
transform = lambda x: y.get(x, 0)
feature_tbl = feature_tbl.apply("col_5", "out", transform, "int")
assert(feature_tbl.filter(col("out") == 2).size() == 3)
assert(feature_tbl.filter(col("out") == 1).size() == 1)
assert(feature_tbl.filter(col("out") == 0).size() == 1)
def test_fillna_int(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
filled_tbl = feature_tbl.fillna(5, ["col_2", "col_3"])
assert isinstance(filled_tbl, FeatureTable), "filled_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_2 is null").count() != 0 and feature_tbl \
.df.filter("col_3 is null").count() != 0, "feature_tbl should not be changed"
assert filled_tbl.df.filter("col_2 == 5").count() == 1, "col_2 null values should be " \
"filled with 5"
assert filled_tbl.df.filter("col_3 == 5").count() == 1, "col_3 null values should be " \
"filled with 5"
filled_tbl = feature_tbl.fillna(5, None)
assert filled_tbl.df.filter("col_2 == 5").count() == 1, "col_2 null values should be " \
"filled with 5"
assert filled_tbl.df.filter("col_3 == 5").count() == 1, "col_3 null values should be " \
"filled with 5"
with self.assertRaises(Exception) as context:
feature_tbl.fillna(0, ["col_2", "col_3", "col_8"])
self.assertTrue('do not exist in this Table' in str(context.exception))
def test_fillna_double(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
filled_tbl = feature_tbl.fillna(3.2, ["col_2", "col_3"])
assert isinstance(filled_tbl, FeatureTable), "filled_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_2 is null").count() != 0 and feature_tbl \
.df.filter("col_3 is null").count() != 0, "feature_tbl should not be changed"
assert filled_tbl.df.filter("col_2 is null").count() == 0, "col_2 null values should be " \
"filled"
assert filled_tbl.df.filter("col_3 is null").count() == 0, "col_3 null values should be " \
"filled"
filled_tbl = feature_tbl.fillna(5, ["col_2", "col_3"])
assert filled_tbl.df.filter("col_2 == 5").count() == 1, "col_2 null values should be " \
"filled with 5"
assert filled_tbl.df.filter("col_3 == 5").count() == 1, "col_3 null values should be " \
"filled with 5"
def test_fillna_long(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
filled_tbl = feature_tbl.fillna(3, ["col_1", "col_2", "col_3"])
assert isinstance(filled_tbl, FeatureTable), "filled_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_2 is null").count() != 0 and feature_tbl \
.df.filter("col_3 is null").count() != 0, "feature_tbl should not be changed"
assert filled_tbl.df.filter("col_1 is null").count() == 0, "col_1 null values should be " \
"filled"
assert filled_tbl.df.filter("col_2 is null").count() == 0, "col_2 null values should be " \
"filled"
assert filled_tbl.df.filter("col_3 is null").count() == 0, "col_3 null values should be " \
"filled"
def test_fillna_string(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
with self.assertRaises(Exception) as context:
feature_tbl.fillna(3.2, ["col_4", "col_5"])
self.assertTrue('numeric does not match the type of column col_4' in str(context.exception))
filled_tbl = feature_tbl.fillna("bb", ["col_4", "col_5"])
assert isinstance(filled_tbl, FeatureTable), "filled_tbl should be a FeatureTable"
assert filled_tbl.df.filter("col_4 is null").count() == 0, "col_4 null values should be " \
"filled"
assert filled_tbl.df.filter("col_5 is null").count() == 0, "col_5 null values should be " \
"filled"
def test_filter_by_frequency(self):
data = [("a", "b", 1),
("b", "a", 2),
("a", "bc", 3),
("c", "c", 2),
("b", "a", 2),
("ab", "c", 1),
("c", "b", 1),
("a", "b", 1)]
schema = StructType([StructField("A", StringType(), True),
StructField("B", StringType(), True),
StructField("C", IntegerType(), True)])
spark = OrcaContext.get_spark_session()
df = spark.createDataFrame(data, schema)
tbl = FeatureTable(df).filter_by_frequency(["A", "B", "C"])
assert tbl.to_spark_df().count() == 2, "the count of frequency >=2 should be 2"
def test_hash_encode(self):
spark = OrcaContext.get_spark_session()
data = [("a", "b", 1),
("b", "a", 2),
("a", "c", 3),
("c", "c", 2),
("b", "a", 1),
("a", "d", 1)]
schema = StructType([StructField("A", StringType(), True),
StructField("B", StringType(), True),
StructField("C", IntegerType(), True)])
df = spark.createDataFrame(data, schema)
tbl = FeatureTable(df)
hash_str = lambda x: hashlib.md5(str(x).encode('utf-8', 'strict')).hexdigest()
hash_int = lambda x: int(hash_str(x), 16) % 100
hash_value = []
for row in df.collect():
hash_value.append(hash_int(row[0]))
tbl_hash = []
for record in tbl.hash_encode(["A"], 100).to_spark_df().collect():
tbl_hash.append(int(record[0]))
assert(operator.eq(hash_value, tbl_hash)), "the hash encoded value should be equal"
def test_cross_hash_encode(self):
spark = OrcaContext.get_spark_session()
data = [("a", "b", "c", 1),
("b", "a", "d", 2),
("a", "c", "e", 3),
("c", "c", "c", 2),
("b", "a", "d", 1),
("a", "d", "e", 1)]
schema = StructType([StructField("A", StringType(), True),
StructField("B", StringType(), True),
StructField("C", StringType(), True),
StructField("D", IntegerType(), True)])
df = spark.createDataFrame(data, schema)
cross_hash_df = df.withColumn("A_B_C", concat("A", "B", "C"))
tbl = FeatureTable(df)
cross_hash_str = lambda x: hashlib.md5(str(x).encode('utf-8', 'strict')).hexdigest()
cross_hash_int = lambda x: int(cross_hash_str(x), 16) % 100
cross_hash_value = []
for row in cross_hash_df.collect():
cross_hash_value.append(cross_hash_int(row[4]))
tbl_cross_hash = []
for record in tbl.cross_hash_encode(["A", "B", "C"], 100).to_spark_df().collect():
tbl_cross_hash.append(int(record[4]))
assert(operator.eq(cross_hash_value, tbl_cross_hash)), "the crossed hash encoded value" \
"should be equal"
def test_gen_string_idx(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list = feature_tbl.gen_string_idx(["col_4", "col_5"], freq_limit=1)
assert string_idx_list[0].size() == 3, "col_4 should have 3 indices"
assert string_idx_list[1].size() == 2, "col_5 should have 2 indices"
with tempfile.TemporaryDirectory() as local_path:
for str_idx in string_idx_list:
str_idx.write_parquet(local_path)
str_idx_log = str_idx.log(["id"])
assert str_idx.df.filter("id == 1").count() == 1, "id in str_idx should = 1"
assert str_idx_log.df.filter("id == 1").count() == 0, "id in str_idx_log should " \
"!= 1"
assert os.path.isdir(local_path + "/col_4.parquet")
assert os.path.isdir(local_path + "/col_5.parquet")
new_col_4_idx = StringIndex.read_parquet(local_path + "/col_4.parquet")
assert "col_4" in new_col_4_idx.df.columns, "col_4 should be a column of new_col_4_idx"
with self.assertRaises(Exception) as context:
StringIndex.read_parquet(local_path + "/col_5.parquet", "col_4")
self.assertTrue('col_4 should be a column of the DataFrame' in str(context.exception))
def test_gen_string_idx_dict(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list = feature_tbl.gen_string_idx(["col_4", "col_5"], freq_limit={"col_4": 1,
"col_5": 3})
with self.assertRaises(Exception) as context:
feature_tbl.gen_string_idx(["col_4", "col_5"], freq_limit="col_4:1,col_5:3")
self.assertTrue('freq_limit only supports int, dict or None, but get str' in str(
context.exception))
assert string_idx_list[0].size() == 3, "col_4 should have 3 indices"
assert string_idx_list[1].size() == 1, "col_5 should have 1 indices"
def test_gen_string_idx_none(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list = feature_tbl.gen_string_idx(["col_4", "col_5"], freq_limit=None)
assert string_idx_list[0].size() == 3, "col_4 should have 3 indices"
assert string_idx_list[1].size() == 2, "col_5 should have 2 indices"
def test_gen_reindex_mapping(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list = feature_tbl.gen_string_idx(["col_4", "col_5"],
freq_limit={"col_4": 1, "col_5": 1},
order_by_freq=False)
tbl = feature_tbl.encode_string(["col_4", "col_5"], string_idx_list)
index_tbls = tbl.gen_reindex_mapping(["col_4", "col_5"], 1)
assert(index_tbls[0].size() == 3)
assert(index_tbls[1].size() == 2)
def test_gen_string_idx_union(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list1 = feature_tbl \
.gen_string_idx(["col_4", 'col_5'],
freq_limit=1)
assert string_idx_list1[0].size() == 3, "col_4 should have 3 indices"
assert string_idx_list1[1].size() == 2, "col_5 should have 2 indices"
new_tbl1 = feature_tbl.encode_string(['col_4', 'col_5'], string_idx_list1)
assert new_tbl1.max("col_5").to_list("max")[0] == 2, "col_5 max value should be 2"
string_idx_list2 = feature_tbl \
.gen_string_idx(["col_4", {"src_cols": ["col_4", "col_5"], "col_name": 'col_5'}],
freq_limit=1)
assert string_idx_list2[0].size() == 3, "col_4 should have 3 indices"
assert string_idx_list2[1].size() == 4, "col_5 should have 4 indices"
new_tbl2 = feature_tbl.encode_string(['col_4', 'col_5'], string_idx_list2)
assert new_tbl2.max("col_5").to_list("max")[0] == 4, "col_5 max value should be 4"
string_idx_3 = feature_tbl \
.gen_string_idx({"src_cols": ["col_4", "col_5"], "col_name": 'col_5'}, freq_limit=1)
assert string_idx_3.size() == 4, "col_5 should have 4 indices"
new_tbl3 = feature_tbl.encode_string('col_5', string_idx_3)
assert new_tbl3.max("col_5").to_list("max")[0] == 4, "col_5 max value should be 4"
def test_gen_string_idx_split(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
to_list_str = udf(lambda arr: ','.join(arr))
df = feature_tbl.dropna(['col_4', 'col_5']).df.withColumn("list1", array('col_4', 'col_5'))\
.withColumn("list1", to_list_str(col("list1")))
tbl = FeatureTable(df)
string_idx_1 = tbl.gen_string_idx("list1", do_split=True, sep=",", freq_limit=1)
assert string_idx_1.size() == 4, "list1 should have 4 indices"
new_tbl1 = tbl.encode_string('list1', string_idx_1, do_split=True, sep=",")
assert isinstance(new_tbl1.to_list("list1"), list), \
"encode list should return list of int"
new_tbl2 = tbl.encode_string(['list1'], string_idx_1, do_split=True, sep=",",
sort_for_array=True)
l1 = new_tbl2.to_list("list1")[0]
l2 = l1.copy()
l2.sort()
assert l1 == l2, "encode list with sort should sort"
new_tbl3 = tbl.encode_string(['list1'], string_idx_1, do_split=True, sep=",",
keep_most_frequent=True)
assert isinstance(new_tbl3.to_list("list1")[0], int), \
"encode list with keep most frequent should only keep one int"
def test_clip(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
clip_tbl = feature_tbl.clip(["col_1", "col_2", "col_3"], min=2, max=None)
assert isinstance(clip_tbl, FeatureTable), "clip_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_1 < 2").count() != 0 and feature_tbl \
.df.filter("col_2 < 2").count() != 0, "feature_tbl should not be changed"
assert clip_tbl.df.filter("col_1 < 2").count() == 0, "col_1 should >= 2"
assert clip_tbl.df.filter("col_2 < 2").count() == 0, "col_2 should >= 2"
assert clip_tbl.df.filter("col_3 < 2").count() == 0, "col_3 should >= 2"
with self.assertRaises(Exception) as context:
feature_tbl.clip(None, 2)
self.assertTrue('columns should be str or a list of str, but got None.'
in str(context.exception))
feature_tbl = FeatureTable.read_parquet(file_path)
clip_tbl = feature_tbl.clip(["col_1", "col_2", "col_3"], min=None, max=1)
assert isinstance(clip_tbl, FeatureTable), "clip_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_1 > 1").count() != 0 and feature_tbl \
.df.filter("col_2 > 1").count() != 0, "feature_tbl should not be changed"
assert clip_tbl.df.filter("col_1 > 1").count() == 0, "col_1 should <= 1"
assert clip_tbl.df.filter("col_2 > 1").count() == 0, "col_2 should <= 1"
assert clip_tbl.df.filter("col_3 > 1").count() == 0, "col_3 should <= 1"
feature_tbl = FeatureTable.read_parquet(file_path)
clip_tbl = feature_tbl.clip(["col_1", "col_2", "col_3"], min=0, max=1)
assert isinstance(clip_tbl, FeatureTable), "clip_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_1 > 1 or col_1 < 0").count() != 0 and feature_tbl \
.df.filter("col_2 > 1 or col_2 < 0").count() != 0, "feature_tbl should not be changed"
assert clip_tbl.df.filter("col_1 < 0").count() == 0, "col_1 should >= 0"
assert clip_tbl.df.filter("col_2 > 1").count() == 0, "col_2 should <= 1"
assert clip_tbl.df.filter("col_3 < 0 or col_3 > 1").count() == 0, "col_3 should >=0 " \
"and <= 1"
def test_dropna(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
dropped_tbl = feature_tbl.dropna(["col_1", "col_4"])
assert isinstance(dropped_tbl, FeatureTable), "dropped_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_1 is null").count() != 0 and feature_tbl\
.df.filter("col_4 is null").count() != 0, "feature_tbl should not be changed"
assert dropped_tbl.df.filter("col_1 is null").count() == 0, "col_1 null values should " \
"be dropped"
assert dropped_tbl.df.filter("col_4 is null").count() == 0, "col_4 null values should " \
"be dropped"
assert 0 < dropped_tbl.df.count() < feature_tbl.df.count(), "the number of rows should " \
"be decreased"
dropped_tbl = feature_tbl.dropna(["col_1", "col_4"], how="all")
assert dropped_tbl.df.filter("col_1 is null and col_4 is null").count() == 0, \
"col_1 and col_4 should not both have null values"
dropped_tbl = feature_tbl.dropna(["col_2", "col_4"], how="all")
assert dropped_tbl.df.filter("col_2 is null").count() > 0, \
"col_2 should still have null values after dropna with how=all"
dropped_tbl = feature_tbl.dropna(["col_2", "col_3", "col_5"], thresh=2)
assert dropped_tbl.df.filter("col_2 is null").count() > 0, \
"col_2 should still have null values after dropna with thresh=2"
assert dropped_tbl.df.filter("col_3 is null and col_5 is null").count() == 0, \
"col_3 and col_5 should not both have null values"
def test_fill_median(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
with self.assertRaises(Exception) as context:
feature_tbl.fill_median(["col_4", "col_5"])
self.assertTrue('col_4 with data type StringType is not supported' in
str(context.exception))
filled_tbl = feature_tbl.fill_median(["col_1", "col_2"])
assert isinstance(filled_tbl, FeatureTable), "filled_tbl should be a FeatureTable"
assert filled_tbl.df.filter("col_1 is null").count() == 0, "col_1 null values should be " \
"filled"
assert filled_tbl.df.filter("col_2 is null").count() == 0, "col_2 null values should be " \
"filled"
def test_filter(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
filtered_tbl = feature_tbl.filter(feature_tbl.col_1 == 1)
assert filtered_tbl.size() == 3, "Only 3 out of 5 rows has value 1 for col_1"
filtered_tbl2 = feature_tbl.filter(
(feature_tbl.col("col_1") == 1) & (feature_tbl.col_2 == 1))
assert filtered_tbl2.size() == 1, "Only 1 out of 5 rows has value 1 for col_1 and col_2"
def test_random_split(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
table1, table2 = feature_tbl.random_split([0.8, 0.2], seed=1)
cnt1, cnt2 = table1.size(), table2.size()
cnt = feature_tbl.size()
assert cnt == cnt1 + cnt2, "size of full table should equal to sum of size of splited ones"
def test_rename(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
name_dict = {"col_1": "new_col1", "col_4": "new_col4"}
rename_tbl = feature_tbl.rename(name_dict)
cols = rename_tbl.df.columns
assert isinstance(rename_tbl, FeatureTable), "rename_tbl should be a FeatureTable"
assert "col_1" in feature_tbl.df.columns, "feature_tbl should not be changed"
assert "new_col1" in cols, "new_col1 should be a column of the renamed tbl."
assert "new_col4" in cols, "new_col4 should be a column of the renamed tbl."
def test_log(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
log_tbl = feature_tbl.log(["col_1", "col_2", "col_3"])
assert isinstance(log_tbl, FeatureTable), "log_tbl should be a FeatureTable"
assert feature_tbl.df.filter("col_1 == 1").count() != 0 and feature_tbl \
.df.filter("col_2 == 1").count() != 0, "feature_tbl should not be changed"
assert log_tbl.df.filter("col_1 == 1").count() == 0, "col_1 should != 1"
assert log_tbl.df.filter("col_2 == 1").count() == 0, "col_2 should != 1"
assert log_tbl.df.filter("col_3 == 1").count() == 0, "col_3 should != 1"
def test_merge(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
merged_tbl = feature_tbl.merge_cols(["col_1", "col_2", "col_3"], "int_cols")
assert "col_1" not in merged_tbl.df.columns, "col_1 shouldn't be a column of merged_tbl"
assert "int_cols" in merged_tbl.df.columns, "int_cols should be a column of merged_tbl"
assert "col_1" in feature_tbl.df.columns, "col_1 should be a column of feature_tbl"
def test_norm(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path).fillna(0, ["col_2", "col_3"])
normalized_tbl, min_max_dic = feature_tbl.min_max_scale(["col_2"])
max_value = normalized_tbl.df.select("col_2") \
.agg(max(col("col_2")).alias("max")) \
.rdd.map(lambda row: row['max']).collect()[0]
min_value = normalized_tbl.df.select("col_2") \
.agg(min(col("col_2")).alias("min")) \
.rdd.map(lambda row: row['min']).collect()[0]
assert max_value <= 1, "col_2 shouldn't be more than 1 after normalization"
assert min_value >= 0, "col_2 shouldn't be less than 0 after normalization"
tbl2 = FeatureTable(feature_tbl.df.withColumn("col2-col3", array(["col_2", "col_3"])))
normalized_tbl2, min_max_dic2 = tbl2.min_max_scale(["col_2", "col2-col3"])
test_file_path = os.path.join(self.resource_path, "parquet/data3.parquet")
test_tbl = FeatureTable.read_parquet(test_file_path).fillna(0, ["col_2", "col_3"])
scaled = test_tbl.transform_min_max_scale(["col_2"], min_max_dic)
max_value = scaled.df.select("col_2") \
.agg(max(col("col_2")).alias("max")) \
.rdd.map(lambda row: row['max']).collect()[0]
min_value = scaled.df.select("col_2") \
.agg(min(col("col_2")).alias("min")) \
.rdd.map(lambda row: row['min']).collect()[0]
assert max_value <= 1, "col_2 shouldn't be more than 1 after normalization"
assert min_value >= 0, "col_2 shouldn't be less than 0 after normalization"
test_tbl2 = FeatureTable(test_tbl.df.withColumn("col2-col3", array(["col_2", "col_3"])))
scaled2 = test_tbl2.transform_min_max_scale(["col_2", "col2-col3"], min_max_dic2)
max_value = scaled2.df.select("col2-col3") \
.agg(max(col("col2-col3")).alias("max")) \
.rdd.map(lambda row: row['max']).collect()[0]
min_value = scaled2.df.select("col2-col3") \
.agg(min(col("col2-col3")).alias("min")) \
.rdd.map(lambda row: row['min']).collect()[0]
assert max_value[0] <= 1 and max_value[1] <= 1, \
"col2-col3 shouldn't be more than 1 after normalization"
assert min_value[0] >= 0 and min_value[1] >= 0, \
"col2-col3 shouldn't be less than 0 after normalization"
def test_cross(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path).fillna(0, ["col_2", "col_3"])
crossed_tbl = feature_tbl.cross_columns([["col_2", "col_3"]], [100])
assert "col_2_col_3" in crossed_tbl.df.columns, "crossed column is not created"
max_value = crossed_tbl.df.select("col_2_col_3") \
.agg(max(col("col_2_col_3")).alias("max")) \
.rdd.map(lambda row: row['max']).collect()[0]
min_value = crossed_tbl.df.select("col_2_col_3") \
.agg(min(col("col_2_col_3")).alias("min")) \
.rdd.map(lambda row: row['min']).collect()[0]
assert max_value <= 100, "cross value shouldn't be more than 100 after cross"
assert min_value > 0, "cross value shouldn't be less than 0 after cross"
def test_add_negative_items(self):
spark = OrcaContext.get_spark_session()
data = [("jack", 1, "2019-07-01 12:01:19.000"),
("jack", 2, "2019-08-01 12:01:19.000"),
("jack", 3, "2019-09-01 12:01:19.000"),
("alice", 4, "2019-09-01 12:01:19.000"),
("alice", 5, "2019-10-01 12:01:19.000"),
("alice", 6, "2019-11-01 12:01:19.000")]
schema = StructType([
StructField("name", StringType(), True),
StructField("item", IntegerType(), True),
StructField("time", StringType(), True)
])
df = spark.createDataFrame(data=data, schema=schema)
tbl = FeatureTable(df).add_negative_samples(10)
dft = tbl.df
assert tbl.size() == 12
assert dft.filter("label == 1").count() == 6
assert dft.filter("label == 0").count() == 6
def test_add_hist_seq(self):
spark = OrcaContext.get_spark_session()
data = [("jack", 1, "2019-07-01 12:01:19.000"),
("jack", 2, "2019-08-01 12:01:19.000"),
("jack", 3, "2019-09-01 12:01:19.000"),
("jack", 4, "2019-07-02 12:01:19.000"),
("jack", 5, "2019-08-03 12:01:19.000"),
("jack", 6, "2019-07-04 12:01:19.000"),
("jack", 7, "2019-08-05 12:01:19.000"),
("alice", 4, "2019-09-01 12:01:19.000"),
("alice", 5, "2019-10-01 12:01:19.000"),
("alice", 6, "2019-11-01 12:01:19.000")]
schema = StructType([StructField("name", StringType(), True),
StructField("item", IntegerType(), True),
StructField("time", StringType(), True)])
df = spark.createDataFrame(data=data, schema=schema)
df = df.withColumn("ts", col("time").cast("timestamp").cast("long"))
tbl = FeatureTable(df.select("name", "item", "ts")) \
.add_hist_seq(["item"], "name", "ts", 1, 4)
tbl2 = FeatureTable(df.select("name", "item", "ts")) \
.add_hist_seq(["item"], "name", "ts", 1, 4, 1)
assert tbl.size() == 8
assert tbl.df.filter(col("name") == "alice").count() == 2
assert tbl.df.filter("name like '%jack'").count() == 6
assert "item_hist_seq" in tbl.df.columns
assert tbl2.size() == 2
assert tbl2.df.filter(col("name") == "alice").count() == 1
assert tbl2.df.filter("name like '%jack'").count() == 1
assert "item_hist_seq" in tbl2.df.columns
def test_gen_neg_hist_seq(self):
spark = OrcaContext.get_spark_session()
sc = OrcaContext.get_spark_context()
data = [
("jack", [1, 2, 3, 4, 5]),
("alice", [4, 5, 6, 7, 8]),
("rose", [1, 2])]
schema = StructType([
StructField("name", StringType(), True),
StructField("item_hist_seq", ArrayType(IntegerType()), True)])
df = spark.createDataFrame(data, schema)
df2 = sc \
.parallelize([(1, 0), (2, 0), (3, 0), (4, 1), (5, 1), (6, 1), (7, 2), (8, 2), (9, 2)]) \
.toDF(["item", "category"]).withColumn("item", col("item").cast("Integer")) \
.withColumn("category", col("category").cast("Integer"))
tbl = FeatureTable(df)
tbl = tbl.add_neg_hist_seq(9, "item_hist_seq", 4)
assert tbl.df.select("neg_item_hist_seq").count() == 3
def test_add_value_features(self):
spark = OrcaContext.get_spark_session()
sc = OrcaContext.get_spark_context()
data = [
("jack", [1, 2, 3, 4, 5]),
("alice", [4, 5, 6, 7, 8]),
("rose", [1, 2])]
schema = StructType([
StructField("name", StringType(), True),
StructField("item_hist_seq", ArrayType(IntegerType()), True)])
df = spark.createDataFrame(data, schema)
df.filter("name like '%alice%'").show()
df2 = sc \
.parallelize([(0, 0), (1, 0), (2, 0), (3, 0), (4, 1), (5, 1), (6, 1), (8, 2), (9, 2)]) \
.toDF(["item", "category"]).withColumn("item", col("item").cast("Integer")) \
.withColumn("category", col("category").cast("Integer"))
tbl = FeatureTable(df)
tbl2 = tbl.add_neg_hist_seq(9, "item_hist_seq", 4)
tbl3 = tbl2.add_value_features(["item_hist_seq", "neg_item_hist_seq"],
FeatureTable(df2), "item", "category")
assert tbl3.df.select("category_hist_seq").count() == 3
assert tbl3.df.select("neg_category_hist_seq").count() == 3
assert tbl3.df.filter("name like '%alice%'").select("neg_category_hist_seq").count() == 1
assert tbl3.df.filter("name == 'rose'").select("neg_category_hist_seq").count() == 1
def test_reindex(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
string_idx_list = feature_tbl.gen_string_idx(["col_4", "col_5"],
freq_limit={"col_4": 1, "col_5": 1},
order_by_freq=False)
tbl_with_index = feature_tbl.encode_string(["col_4", "col_5"], string_idx_list)
index_tbls = tbl_with_index.gen_reindex_mapping(["col_4", "col_5"], 2)
reindexed = tbl_with_index.reindex(["col_4", "col_5"], index_tbls)
assert(reindexed.filter(col("col_4") == 0).size() == 2)
assert(reindexed.filter(col("col_4") == 1).size() == 2)
assert(reindexed.filter(col("col_5") == 0).size() == 1)
assert(reindexed.filter(col("col_5") == 1).size() == 3)
def test_pad(self):
spark = OrcaContext.get_spark_session()
data = [
("jack", [1, 2, 3, 4, 5], [[1, 2, 3], [1, 2, 3]]),
("alice", [4, 5, 6, 7, 8], [[1, 2, 3], [1, 2, 3]]),
("rose", [1, 2], [[1, 2, 3]])]
schema = StructType([StructField("name", StringType(), True),
StructField("list", ArrayType(IntegerType()), True),
StructField("matrix", ArrayType(ArrayType(IntegerType())))])
df = spark.createDataFrame(data, schema)
tbl1 = FeatureTable(df).pad(["list", "matrix"], seq_len=4)
dft1 = tbl1.df
tbl2 = FeatureTable(df).pad(cols=["list", "matrix"], mask_cols=["list"], seq_len=4)
assert dft1.filter("size(matrix) = 4").count() == 3
assert dft1.filter("size(list) = 4").count() == 3
assert tbl2.df.filter("size(list_mask) = 4").count() == 3
assert tbl2.df.filter("size(list_mask) = 2").count() == 0
assert "list_mask" in tbl2.df.columns
def test_median(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
with self.assertRaises(Exception) as context:
feature_tbl.median(["col_4", "col_5"])
self.assertTrue('col_4 with data type StringType is not supported' in
str(context.exception))
median_tbl = feature_tbl.median(["col_1", "col_2", "col_3"])
assert isinstance(median_tbl, FeatureTable), "median_tbl should be a FeatureTable"
assert median_tbl.df.count() == 3, "the number of rows of median_tbl should be equal to " \
"the number of specified columns"
assert median_tbl.df.filter("column == 'col_1'").count() == 1, "col_1 should exist in " \
"'column' of median_tbl"
assert median_tbl.df.filter("column == 'col_2'").filter("median == 1.0").count() == 1, \
"the median of col_2 should be 1.0"
def test_cast(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8),
("alice", "34", 25, 9),
("rose", "25344", 23, 10)]
schema = StructType([StructField("name", StringType(), True),
StructField("a", StringType(), True),
StructField("b", IntegerType(), True),
StructField("c", IntegerType(), True)])
df = spark.createDataFrame(data, schema)
tbl = FeatureTable(df)
tbl = tbl.cast("a", "int")
assert dict(tbl.df.dtypes)['a'] == "int", "column a should be now be cast to integer type"
tbl = tbl.cast("a", "float")
assert dict(tbl.df.dtypes)['a'] == "float", "column a should be now be cast to float type"
tbl = tbl.cast(["b", "c"], "double")
assert dict(tbl.df.dtypes)['b'] == dict(tbl.df.dtypes)['c'] == "double", \
"column b and c should be now be cast to double type"
tbl = tbl.cast(None, "float")
assert dict(tbl.df.dtypes)['name'] == dict(tbl.df.dtypes)['a'] == dict(tbl.df.dtypes)['b'] \
== dict(tbl.df.dtypes)['c'] == "float", \
"all the columns should now be cast to float type"
with self.assertRaises(Exception) as context:
tbl = tbl.cast("a", "notvalid")
self.assertTrue(
"type should be string, boolean, int, long, short, float, double."
in str(context.exception))
def test_select(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
select_tbl = feature_tbl.select("col_1", "col_2")
assert "col_1" in select_tbl.df.columns, "col_1 shoul be selected"
assert "col_2" in select_tbl.df.columns, "col_2 shoud be selected"
assert "col_3" not in select_tbl.df.columns, "col_3 shoud not be selected"
assert feature_tbl.size() == select_tbl.size(), \
"the selected table should have the same rows"
with self.assertRaises(Exception) as context:
feature_tbl.select()
self.assertTrue("cols should be str or a list of str, but got None."
in str(context.exception))
def test_create_from_dict(self):
indices = {'a': 1, 'b': 2, 'c': 3}
col_name = 'letter'
tbl = StringIndex.from_dict(indices, col_name)
assert 'id' in tbl.df.columns, "id should be one column in the stringindex"
assert 'letter' in tbl.df.columns, "letter should be one column in the stringindex"
assert tbl.size() == 3, "the StringIndex should have three rows"
with self.assertRaises(Exception) as context:
StringIndex.from_dict(indices, None)
self.assertTrue("col_name should be str, but get None"
in str(context.exception))
with self.assertRaises(Exception) as context:
StringIndex.from_dict(indices, 12)
self.assertTrue("col_name should be str, but get int"
in str(context.exception))
with self.assertRaises(Exception) as context:
StringIndex.from_dict([indices], col_name)
self.assertTrue("indices should be dict, but get list"
in str(context.exception))
def test_encode_string_from_dict(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8),
("alice", "34", 25, 9),
("rose", "25344", 23, 10)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", IntegerType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
columns = ["name", "num"]
indices = []
indices.append({"jack": 1, "alice": 2, "rose": 3})
indices.append({"123": 3, "34": 1, "25344": 2})
tbl = tbl.encode_string(columns, indices)
assert 'name' in tbl.df.columns, "name should be still in the columns"
assert 'num' in tbl.df.columns, "num should be still in the columns"
assert tbl.df.where(tbl.df.age == 14).select("name").collect()[0]["name"] == 1, \
"the first row of name should be 1"
assert tbl.df.where(tbl.df.height == 10).select("num").collect()[0]["num"] == 2, \
"the third row of num should be 2"
def test_write_csv(self):
spark = OrcaContext.get_spark_session()
data = [("jack", 14, 8),
("alice", 25, 9),
("rose", 23, 10)]
schema = StructType([StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", IntegerType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
directory = "write.csv"
if os.path.exists("write.csv"):
shutil.rmtree("write.csv")
tbl.write_csv(directory, mode="overwrite", header=True, num_partitions=1)
assert os.path.exists("write.csv"), "files not write"
result = FeatureTable(spark.read.csv(directory, header=True))
assert isinstance(result, FeatureTable)
assert result.size() == 3, "the size of result should be 3"
assert result.filter("age == 23").size() == 1, "wrong age"
assert result.filter("name == 'jack'").size() == 1, "wrong name"
assert result.filter("name == 'alice'").size() == 1, "wrong name"
shutil.rmtree(directory)
def test_concat(self):
spark = OrcaContext.get_spark_session()
data1 = [("jack", 1)]
data2 = [(2, "alice")]
data3 = [("amy", 3, 50)]
schema1 = StructType([StructField("name", StringType(), True),
StructField("id", IntegerType(), True)])
schema2 = StructType([StructField("id", IntegerType(), True),
StructField("name", StringType(), True)])
schema3 = StructType([StructField("name", StringType(), True),
StructField("id", IntegerType(), True),
StructField("weight", IntegerType(), True)])
tbl1 = FeatureTable(spark.createDataFrame(data1, schema1))
tbl2 = FeatureTable(spark.createDataFrame(data2, schema2))
tbl3 = FeatureTable(spark.createDataFrame(data3, schema3))
tbl = tbl1.concat(tbl1)
assert tbl.size() == 2
tbl = tbl1.concat(tbl1, distinct=True)
assert tbl.size() == 1
tbl = tbl1.concat(tbl2)
assert tbl.filter("name == 'jack'").size() == 1
assert tbl.filter("name == 'alice'").size() == 1
tbl = tbl1.concat(tbl3, mode="inner")
assert tbl.df.schema.names == ["name", "id"]
tbl = tbl1.concat(tbl3, mode="outer")
assert tbl.df.schema.names == ["name", "id", "weight"]
assert tbl.fillna(0, "weight").filter("weight == 0").size() == 1
tbl = tbl1.concat([tbl1, tbl2, tbl3])
assert tbl.size() == 4
assert tbl.distinct().size() == 3
tbl = tbl1.concat([tbl1, tbl2, tbl3], distinct=True)
assert tbl.size() == 3
def test_drop_duplicates(self):
spark = OrcaContext.get_spark_session()
schema = StructType([StructField("name", StringType(), True),
StructField("grade", StringType(), True),
StructField("number", IntegerType(), True)])
data = [("jack", "a", 1), ("jack", "a", 3), ("jack", "b", 2), ("amy", "a", 2),
("amy", "a", 5), ("amy", "a", 4)]
tbl = FeatureTable(spark.createDataFrame(data, schema))
tbl2 = tbl.drop_duplicates(subset=['name', 'grade'], sort_cols='number', keep='min')
tbl2.df.show()
assert tbl2.size() == 3
assert tbl2.df.filter((tbl2.df.name == 'jack') & (tbl2.df.grade == 'a'))\
.select("number").collect()[0]["number"] == 1
assert tbl2.df.filter((tbl2.df.name == 'jack') & (tbl2.df.grade == 'b'))\
.select("number").collect()[0]["number"] == 2
assert tbl2.df.filter((tbl2.df.name == 'amy') & (tbl2.df.grade == 'a'))\
.select("number").collect()[0]["number"] == 2
tbl3 = tbl.drop_duplicates(subset=['name', 'grade'], sort_cols='number', keep='max')
tbl3.df.show()
assert tbl3.size() == 3
assert tbl3.df.filter((tbl2.df.name == 'jack') & (tbl2.df.grade == 'a'))\
.select("number").collect()[0]["number"] == 3
assert tbl3.df.filter((tbl2.df.name == 'jack') & (tbl2.df.grade == 'b'))\
.select("number").collect()[0]["number"] == 2
assert tbl3.df.filter((tbl2.df.name == 'amy') & (tbl2.df.grade == 'a'))\
.select("number").collect()[0]["number"] == 5
tbl4 = tbl.drop_duplicates(subset=None, sort_cols='number', keep='max')
tbl4.df.show()
assert tbl4.size() == 6
tbl5 = tbl.drop_duplicates(subset=['name', 'grade'], sort_cols=None, keep='max')
tbl5.df.show()
assert tbl5.size() == 3
tbl6 = tbl.drop_duplicates(subset=['name'], sort_cols=["grade", "number"], keep='max')
assert tbl6.size() == 2
tbl6.df.show()
assert tbl6.df.filter((tbl6.df.name == 'jack') & (tbl6.df.grade == 'b')
& (tbl6.df.number == 2))\
.select("number").collect()[0]["number"] == 2
assert tbl6.df.filter((tbl6.df.name == 'amy') & (tbl6.df.grade == 'a')
& (tbl6.df.number == 5))\
.select("number").collect()[0]["number"] == 5
def test_join(self):
spark = OrcaContext.get_spark_session()
schema = StructType([StructField("name", StringType(), True),
StructField("id", IntegerType(), True)])
data = [("jack", 1), ("jack", 2), ("jack", 3)]
tbl = FeatureTable(spark.createDataFrame(data, schema))
tbl2 = FeatureTable(spark.createDataFrame(data, schema))
tbl = tbl.join(tbl2, on="id", lsuffix="_l", rsuffix="_r")
assert "name_l" in tbl.df.schema.names
assert "id" in tbl.df.schema.names
assert "name_r" in tbl.df.schema.names
def test_cut_bins(self):
spark = OrcaContext.get_spark_session()
values = [("a", 23), ("b", 45), ("c", 10), ("d", 60), ("e", 56), ("f", 2),
("g", 25), ("h", 40), ("j", 33)]
tbl = FeatureTable(spark.createDataFrame(values, ["name", "ages"]))
splits = [6, 18, 60]
labels = ["infant", "minor", "adult", "senior"]
# test drop false, name defined
new_tbl = tbl.cut_bins(bins=splits, columns="ages", labels=labels,
out_cols="age_bucket", drop=False)
assert "age_bucket" in new_tbl.columns
assert "ages" in new_tbl.columns
assert new_tbl.df.select("age_bucket").rdd.flatMap(lambda x: x).collect() ==\
["adult", "adult", "minor", "senior", "adult", "infant", "adult", "adult", "adult"]
# test out_col equal to input column
new_tbl = tbl.cut_bins(bins=splits, columns="ages", labels=labels,
out_cols="ages", drop=True)
assert "ages" in new_tbl.columns
assert new_tbl.df.select("ages").rdd.flatMap(lambda x: x).collect() == \
["adult", "adult", "minor", "senior", "adult", "infant", "adult", "adult", "adult"]
# test name not defined
new_tbl = tbl.cut_bins(bins=splits, columns="ages", labels=labels, drop=True)
assert "ages_bin" in new_tbl.columns
assert new_tbl.df.select("ages_bin").rdd.flatMap(lambda x: x).collect() == \
["adult", "adult", "minor", "senior", "adult", "infant", "adult", "adult", "adult"]
# test integer bins
new_tbl = tbl.cut_bins(bins=2, columns="ages", labels=labels, drop=False)
assert "ages_bin" in new_tbl.columns
assert new_tbl.df.select("ages_bin").rdd.flatMap(lambda x: x).collect() \
== ["minor", "adult", "minor", "senior", "adult", "minor", "minor", "adult", "adult"]
# test label is None
new_tbl = tbl.cut_bins(bins=4, columns="ages", drop=True)
assert "ages_bin" in new_tbl.columns
assert new_tbl.df.select("ages_bin").rdd.flatMap(lambda x: x).collect() \
== [2, 3, 1, 5, 4, 1, 2, 3, 3]
# test multiple columns
values = [("a", 23, 23), ("b", 45, 45), ("c", 10, 10), ("d", 60, 60), ("e", 56, 56),
("f", 2, 2), ("g", 25, 25), ("h", 40, 40), ("j", 33, 33)]
tbl = FeatureTable(spark.createDataFrame(values, ["name", "ages", "number"]))
splits = [6, 18, 60]
splits2 = [6, 18, 60]
labels = ["infant", "minor", "adult", "senior"]
new_tbl = tbl.cut_bins(bins={'ages': splits, 'number': splits2}, columns=["ages", 'number'],
labels={'ages': labels, 'number': labels}, out_cols=None, drop=False)
assert "ages_bin" in new_tbl.columns
assert "ages" in new_tbl.columns
assert "number_bin" in new_tbl.columns
assert new_tbl.df.select("ages_bin").rdd.flatMap(lambda x: x).collect() ==\
["adult", "adult", "minor", "senior", "adult", "infant", "adult", "adult", "adult"]
def test_columns(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
col_names = feature_tbl.columns
assert isinstance(col_names, list), "col_names should be a list of strings"
assert col_names == ["col_1", "col_2", "col_3", "col_4", "col_5"], \
"column names are incorrenct"
def test_get_stats(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8.5),
("alice", "34", 25, 9.7),
("rose", "25344", 23, 10.0)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
columns = ["age", "height"]
# test str
statistics = tbl.get_stats(columns, "min")
assert len(statistics) == 2, "the dict should contain two statistics"
assert statistics["age"] == 14, "the min value of age is not correct"
assert statistics["height"] == 8.5, "the min value of height is not correct"
columns = ["age", "height"]
# test dict
statistics = tbl.get_stats(columns, {"age": "max", "height": "avg"})
assert len(statistics) == 2, "the dict should contain two statistics"
assert statistics["age"] == 25, "the max value of age is not correct"
assert statistics["height"] == 9.4, "the avg value of height is not correct"
# test list
statistics = tbl.get_stats(columns, ["min", "max"])
assert len(statistics) == 2, "the dict should contain two statistics"
assert statistics["age"][0] == 14, "the min value of age is not correct"
assert statistics["age"][1] == 25, "the max value of age is not correct"
assert statistics["height"][0] == 8.5, "the min value of height is not correct"
assert statistics["height"][1] == 10.0, "the max value of height is not correct"
# test dict of list
statistics = tbl.get_stats(columns, {"age": ["min", "max"], "height": ["min", "avg"]})
assert len(statistics) == 2, "the dict should contain two statistics"
assert statistics["age"][0] == 14, "the min value of age is not correct"
assert statistics["age"][1] == 25, "the max value of age is not correct"
assert statistics["height"][0] == 8.5, "the min value of height is not correct"
assert statistics["height"][1] == 9.4, "the max value of height is not correct"
statistics = tbl.get_stats(None, "min")
assert len(statistics) == 2, "the dict should contain two statistics"
assert statistics["age"] == 14, "the min value of age is not correct"
assert statistics["height"] == 8.5, "the min value of height is not correct"
def test_min(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8.5),
("alice", "34", 25, 9.7),
("rose", "25344", 23, 10.0)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
columns = ["age", "height"]
min_result = tbl.min(columns)
assert min_result.to_list("min") == [14, 8.5], \
"the min value for age and height is not correct"
def test_max(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8.5),
("alice", "34", 25, 9.7),
("rose", "25344", 23, 10.0)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
columns = ["age", "height"]
min_result = tbl.max(columns)
assert min_result.to_list("max") == [25, 10.0], \
"the maximum value for age and height is not correct"
def test_to_list(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8.5, [0, 0]),
("alice", "34", 25, 9.6, [1, 1]),
("rose", "25344", 23, 10.0, [2, 2])]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("array", ArrayType(IntegerType()), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
list1 = tbl.to_list("name")
list2 = tbl.to_list("num")
list3 = tbl.to_list("age")
list4 = tbl.to_list("height")
list5 = tbl.to_list("array")
assert list1 == ["jack", "alice", "rose"], "the result of name is not correct"
assert list2 == ["123", "34", "25344"], "the result of num is not correct"
assert list3 == [14, 25, 23], "the result of age is not correct"
assert list4 == [8.5, 9.6, 10.0], "the result of height is not correct"
assert list5 == [[0, 0], [1, 1], [2, 2]], "the result of array is not correct"
def test_to_dict(self):
spark = OrcaContext.get_spark_session()
# test the case the column of key is unique
data = [("jack", "123", 14),
("alice", "34", 25),
("rose", "25344", 23)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
dictionary = tbl.to_dict()
print(dictionary)
assert dictionary["name"] == ['jack', 'alice', 'rose']
def test_to_pandas(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14),
("alice", "34", 25),
("rose", "25344", 23)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
pddf = tbl.to_pandas()
assert(pddf["name"].values.tolist() == ['jack', 'alice', 'rose'])
def test_from_pandas(self):
import pandas as pd
data = [['tom', 10], ['nick', 15], ['juli', 14]]
pddf = pd.DataFrame(data, columns=['Name', 'Age'])
tbl = FeatureTable.from_pandas(pddf)
assert(tbl.size() == 3)
def test_sort(self):
import pandas as pd
data = [['tom', 10], ['nick', 15], ['juli', 14]]
pddf = pd.DataFrame(data, columns=['Name', 'Age'])
tbl = FeatureTable.from_pandas(pddf)
tbl = tbl.sort("Age", ascending=False)
assert(tbl.select("Name").to_list("Name") == ["nick", "juli", "tom"])
tbl = tbl.sort("Name")
assert(tbl.select("Name").to_list("Name") == ["juli", "nick", "tom"])
def test_add(self):
spark = OrcaContext.get_spark_session()
data = [("jack", "123", 14, 8.5),
("alice", "34", 25, 9.6),
("rose", "25344", 23, 10.0)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema))
columns = ["age", "height"]
new_tbl = tbl.add(columns, 1.5)
new_list = new_tbl.df.take(3)
assert len(new_list) == 3, "new_tbl should have 3 rows"
assert new_list[0]['age'] == 15.5, "the age of jack should increase 1.5"
assert new_list[0]['height'] == 10, "the height of jack should increase 1.5"
assert new_list[1]['age'] == 26.5, "the age of alice should increase 1.5"
assert new_list[1]['height'] == 11.1, "the height of alice should increase 1.5"
assert new_list[2]['age'] == 24.5, "the age of rose should increase 1.5"
assert new_list[2]['height'] == 11.5, "the height of rose should increase 1.5"
new_tbl = tbl.add(columns, -1)
new_list = new_tbl.df.take(3)
assert len(new_list) == 3, "new_tbl should have 3 rows"
assert new_list[0]['age'] == 13, "the age of jack should decrease 1"
assert new_list[0]['height'] == 7.5, "the height of jack should decrease 1"
assert new_list[1]['age'] == 24, "the age of alice should decrease 1"
assert new_list[1]['height'] == 8.6, "the height of alice should decrease 1"
assert new_list[2]['age'] == 22, "the age of rose should decrease 1"
assert new_list[2]['height'] == 9.0, "the height of rose should decrease 1"
def test_sample(self):
spark = OrcaContext.get_spark_session()
df = spark.range(1000)
feature_tbl = FeatureTable(df)
total_line_1 = feature_tbl.size()
feature_tbl2 = feature_tbl.sample(0.5)
total_line_2 = feature_tbl2.size()
assert int(total_line_1/2) - 100 < total_line_2 < int(total_line_1/2) + 100, \
"the number of rows should be half"
total_distinct_line = feature_tbl2.distinct().size()
assert total_line_2 == total_distinct_line, "all rows should be distinct"
def test_group_by(self):
file_path = os.path.join(self.resource_path, "parquet/data2.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
groupby_tbl1 = feature_tbl.group_by("col_4", agg={"col_1": ["sum", "count"]})
assert groupby_tbl1.df.filter("col_4 == 'a' and sum(col_1) == 3").count() == 1, \
"the sum of col_1 with col_4 = 'a' should be 3"
assert groupby_tbl1.df.filter("col_4 == 'b' and `count(col_1)` == 5").count() == 1, \
"the count of col_1 with col_4 = 'b' should be 5"
groupby_tbl2 = feature_tbl.group_by(agg={"target": "avg", "col_2": "last"})
assert groupby_tbl2.df.collect()[0]["avg(target)"] == 0.9, \
"the mean of target should be 0.9"
groupby_tbl3 = feature_tbl.group_by("col_5", agg=["max", "min"], join=True)
assert len(groupby_tbl3.df.columns) == len(feature_tbl.df.columns) + 10, \
"groupby_tbl3 should have (#df.columns - #columns)*len(agg)=10 more columns"
assert groupby_tbl3.df.filter("col_5 == 'cc' and `max(col_2)` == 9").count() == \
feature_tbl.df.filter("col_5 == 'cc'").count(), \
"max of col_2 should 9 for all col_5 = 'cc' in groupby_tbl3"
assert groupby_tbl3.df.filter("col_5 == 'aa' and `min(col_3)` == 1.0").count() == \
feature_tbl.df.filter("col_5 == 'aa'").count(), \
"min of col_3 should 1.0 for all col_5 = 'aa' in groupby_tbl3"
groupby_tbl4 = feature_tbl.group_by(["col_4", "col_5"], agg="first", join=True)
assert groupby_tbl4.df.filter("col_4 == 'b' and col_5 == 'dd' and `first(col_1)` == 0") \
.count() == feature_tbl.df.filter("col_4 == 'b' and col_5 == 'dd'").count(), \
"first of col_1 should be 0 for all col_4 = 'b' and col_5 = 'dd' in groupby_tbl4"
def test_append_column(self):
file_path = os.path.join(self.resource_path, "data.csv")
tbl = FeatureTable.read_csv(file_path, header=True)
tbl = tbl.append_column("z", lit(0))
assert tbl.select("z").size() == 4
assert tbl.filter("z == 0").size() == 4
tbl = tbl.append_column("str", lit("a"))
assert tbl.select("str").size() == 4
assert tbl.filter("str == 'a'").size() == 4
tbl = tbl.append_column("float", lit(1.2))
assert tbl.select("float").size() == 4
assert tbl.filter("float == 1.2").size() == 4
def test_ordinal_shuffle(self):
spark = OrcaContext.get_spark_session()
data = [("a", 14), ("b", 25), ("c", 23), ("d", 2), ("e", 1)]
schema = StructType([StructField("name", StringType(), True),
StructField("num", IntegerType(), True)])
tbl = FeatureTable(spark.createDataFrame(data, schema).repartition(1))
shuffled_tbl = tbl.ordinal_shuffle_partition()
rows = tbl.df.collect()
shuffled_rows = shuffled_tbl.df.collect()
rows.sort(key=lambda x: x[1])
shuffled_rows.sort(key=lambda x: x[1])
assert rows == shuffled_rows
def test_write_parquet(self):
file_path = os.path.join(self.resource_path, "parquet/data1.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
feature_tbl.write_parquet("saved.parquet")
loaded_tbl = FeatureTable.read_parquet("saved.parquet")
if os.path.exists("saved.parquet"):
shutil.rmtree("saved.parquet")
def test_read_csv(self):
file_path = os.path.join(self.resource_path, "data.csv")
feature_tbl = FeatureTable.read_csv(file_path, header=True)
assert feature_tbl.size() == 4
columns = feature_tbl.columns
assert columns == ["col1", "col2", "col3"]
records = feature_tbl.df.collect()
assert isinstance(records[0][0], float)
assert isinstance(records[0][1], str) and isinstance(records[0][1], str)
file_path2 = os.path.join(self.resource_path, "data_no_header.csv")
feature_tbl2 = FeatureTable.read_csv(file_path2, names=["col1", "_col2", "col3"],
dtype={"col1": "int"})
assert feature_tbl2.size() == 4
columns2 = feature_tbl2.columns
assert columns2 == ["col1", "_col2", "col3"]
records2 = feature_tbl2.df.collect()
assert isinstance(records2[0][0], int)
assert isinstance(records2[0][1], str) and isinstance(records2[0][1], str)
feature_tbl3 = FeatureTable.read_csv(file_path, header=True, dtype=["int", "str", "str"])
records3 = feature_tbl3.df.collect()
assert isinstance(records3[0][0], int)
assert isinstance(records3[0][1], str) and isinstance(records3[0][1], str)
def test_category_encode_and_one_hot_encode(self):
file_path = os.path.join(self.resource_path, "data.csv")
feature_tbl = FeatureTable.read_csv(file_path, header=True)
feature_tbl, indices = feature_tbl.category_encode(columns=["col2", "col3"])
assert isinstance(indices, list) and len(indices) == 2
assert isinstance(indices[0], StringIndex) and isinstance(indices[1], StringIndex)
assert indices[0].size() == 3 and indices[1].size() == 4
dict1 = indices[0].to_dict()
dict2 = indices[1].to_dict()
records = feature_tbl.df.collect()
assert records[0][1] == dict1["x"] and records[0][2] == dict2["abc"]
assert records[3][1] == dict1["z"] and records[2][2] == dict2["aaa"]
feature_tbl = feature_tbl.one_hot_encode(columns=["col2", "col3"], prefix=["o1", "o2"])
feature_tbl.show()
columns = feature_tbl.columns
assert columns == ["col1", "o1_0", "o1_1", "o1_2", "o1_3", "o2_0",
"o2_1", "o2_2", "o2_3", "o2_4"]
records = feature_tbl.df.collect()
record = records[0]
value1 = dict1["x"]
value2 = dict2["abc"]
for i in range(1, 4):
if i == value1:
assert record[i+1] == 1
else:
assert record[i+1] == 0
for i in range(1, 5):
if i == value2:
assert record[i+5] == 1
else:
assert record[i+5] == 0
def test_split(self):
file_path = os.path.join(self.resource_path, "ncf.csv")
feature_tbl = FeatureTable.read_csv(file_path, header=True, dtype="int")
tbl1, tbl2 = feature_tbl.split([0.8, 0.2], seed=1128)
total_size = feature_tbl.size()
size1 = tbl1.size()
size2 = tbl2.size()
assert size1 + size2 == total_size
def test_target_encode(self):
file_path = os.path.join(self.resource_path, "parquet/data2.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
with self.assertRaises(Exception) as context:
feature_tbl.target_encode("col_4", "target", kfold=-1)
self.assertTrue("kfold should be an integer larger than 0" in str(context.exception))
with self.assertRaises(Exception) as context:
feature_tbl.target_encode("col_4", "col_5")
self.assertTrue("target_cols should be numeric" in str(context.exception))
with self.assertRaises(Exception) as context:
feature_tbl.target_encode("col_4", "target", target_mean={"target": "2"})
self.assertTrue("mean in target_mean should be numeric" in str(context.exception))
with self.assertRaises(Exception) as context:
feature_tbl.target_encode("col_4", "target", kfold=2, fold_col="col_3")
self.assertTrue("fold_col should be integer type" in str(context.exception))
target_tbl1, target_list1 = feature_tbl.target_encode("col_4", "target", kfold=1, smooth=0)
assert len(target_list1) == 1, "len(target_list1) = len(cat_cols) of target_encode"
target_code1 = target_list1[0]
assert isinstance(target_code1, TargetCode), "target_list1 should be list of TargetCode"
assert target_code1.df.filter("col_4 == 'a'").collect()[0]["col_4_te_target"] == \
feature_tbl.df.filter("col_4 == 'a'").agg({"target": "mean"}) \
.collect()[0]["avg(target)"], \
"col_4_te_target should contain mean of target grouped by col_4"
cat_cols = ["col_4", "col_5"]
target_cols = ["col_3", "target"]
fold_col = "fold"
target_mean = {"col_3": 5, "target": 0.5}
out_cols = [[cat_col + target_col for target_col in target_cols] for cat_col in cat_cols]
target_tbl2, target_list2 = feature_tbl.target_encode(
cat_cols,
target_cols,
target_mean=target_mean,
kfold=3,
fold_seed=4,
fold_col=fold_col,
drop_cat=False,
drop_fold=False,
out_cols=out_cols)
assert fold_col in target_tbl2.df.columns, "fold_col should be in target_tbl2"
assert len(target_list2) == len(cat_cols), "len(target_list2) = len(cat_cols)"
for i in range(len(cat_cols)):
assert target_list2[i].cat_col == cat_cols[i], "each element in target_list2 should " \
"correspond to the element in cat_cols"
for out_col in out_cols[i]:
assert out_col in target_list2[i].df.columns, "every out_cols should be one of " \
"the columns in returned TargetCode"
assert target_mean[target_list2[i].out_target_mean[out_col][0]] == \
target_list2[i].out_target_mean[out_col][1], \
"the global mean in TargetCode should be the same as the assigned mean in " \
"target_mean"
target_tbl3, target_list3 = feature_tbl.target_encode([["col_4", "col_5"]], "target",
kfold=2, drop_cat=False)
assert len(target_tbl3.columns) == len(feature_tbl.columns) + 1, \
"target_tbl3 should have one more column col_4_col_5_te_target"
def test_encode_target(self):
file_path = os.path.join(self.resource_path, "parquet/data2.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
spark = OrcaContext.get_spark_session()
data = [("aa", 1.0),
("bb", 2.0),
("cc", 3.0),
("dd", 4.0)]
schema = StructType([StructField("unknown", StringType(), True),
StructField("col_5_te_col_1", DoubleType(), True)])
df0 = spark.createDataFrame(data, schema)
target_code0 = TargetCode(df0,
cat_col="unknown",
out_target_mean={"col_5_te_col_1": ("col_1", 0.5)})
with self.assertRaises(Exception) as context:
feature_tbl.encode_target(target_code0)
self.assertTrue("unknown in TargetCode.cat_col in targets does not exist in Table"
in str(context.exception))
target_code1 = target_code0.rename({"unknown": "col_5"})
target_tbl1 = feature_tbl.encode_target(target_code1)
assert target_tbl1.df.filter("col_5_te_col_1 == 1").count() == \
feature_tbl.df.filter("col_5 == 'aa'").count(), \
"the row with col_5 = 'aa' be encoded as col_5_te_col_1 = 1 in target_tbl1"
assert target_tbl1.df.filter("col_3 == 8.0 and col_4 == 'd'") \
.filter("col_5_te_col_1 == 3"), \
"the row with col_3 = 8.0 and col_4 = 'd' has col_5 = 'cc', " \
"so it should be encoded with col_5_te_col_1 = 3 in target_tbl1"
target_tbl2, target_list2 = feature_tbl.target_encode(
["col_4", "col_5"],
["col_3", "target"],
kfold=2)
target_tbl3 = feature_tbl.encode_target(target_list2, target_cols="target", drop_cat=False)
assert "col_4" in target_tbl3.df.columns, \
"col_4 should exist in target_tbl2 since drop_cat is False"
assert "col_4_te_target" in target_tbl3.df.columns, \
"col_4_te_target should exist in target_tbl2 as encoded column"
assert "col_4_te_col_3" not in target_tbl3.df.columns, \
"col_4_te_col_3 should not exist in target_tbl2 since col_3 is not in target_cols"
def test_difference_lag(self):
file_path = os.path.join(self.resource_path, "parquet/data2.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
with self.assertRaises(Exception) as context:
feature_tbl.difference_lag("col_4", "col_4")
self.assertTrue("columns should be numeric" in str(context.exception))
diff_tbl1 = feature_tbl.difference_lag("col_1", "col_1")
assert diff_tbl1.df.filter("col_1_diff_lag_col_1_1 == 1").count() == 5 and \
diff_tbl1.df.filter("col_1_diff_lag_col_1_1 == 0").count() == 13 and \
diff_tbl1.df.filter("col_1_diff_lag_col_1_1 is null").count() == 2, \
"col_1 has 6 different values and 1 null, so after sorted by col_1, there should" \
" be 5 rows with (lag of col_1) = 1, 2 rows with (lag of col_1) = null," \
" and other rows with (lag of col_1) = 0"
diff_tbl2 = feature_tbl.difference_lag(
["col_1", "col_2"], ["col_3"], shifts=[1, -1],
partition_cols=["col_5"], out_cols=[["c1p1", "c1m1"], ["c2p1", "c2m1"]])
assert diff_tbl2.df.filter("col_3 == 8.0 and col_5 == 'cc'") \
.filter("c1p1 == 1 and c1m1 == 2 and c2p1 == -1 and c2m1 == -4") \
.count() == 1, "the row with col_3 = 8.0 and col_5 = 'cc' should have c1p1 = 1, " \
"c1m1 = 2, c2p1 = -1, c2m1 = -4 after difference_lag"
diff_tbl3 = feature_tbl.difference_lag("col_1", ["col_3"], shifts=[-1],
partition_cols=["col_5"], out_cols="c1m1")
assert diff_tbl3.df.filter("c1m1 == 2").count() == \
diff_tbl2.df.filter("c1m1 == 2").count(), \
"c1m1 should be the same in diff_tbl3 and in diff_tbl2"
diff_tbl4 = feature_tbl.difference_lag("col_1", ["col_3"], shifts=[-1, 1],
partition_cols=["col_5"], out_cols=["c1m1", "c1p1"])
assert diff_tbl4.df.filter("c1p1 == -1").count() == \
diff_tbl2.df.filter("c1p1 == -1").count(), \
"c1p1 should be the same in diff_tbl4 and in diff_tbl2"
def test_cache(self):
file_path = os.path.join(self.resource_path, "parquet/data2.parquet")
feature_tbl = FeatureTable.read_parquet(file_path)
feature_tbl.cache()
assert feature_tbl.df.is_cached, "Cache table should be cached"
feature_tbl.uncache()
assert not feature_tbl.df.is_cached, "Uncache table should be uncached"
if __name__ == "__main__":
pytest.main([__file__])
| 55.962434
| 100
| 0.574846
|
f48d24676816c53a1b23a2e1a46e79d0a307d885
| 10,228
|
py
|
Python
|
raiden/transfer/mediated_transfer/mediation_fee.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/transfer/mediated_transfer/mediation_fee.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/transfer/mediated_transfer/mediation_fee.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
from bisect import bisect, bisect_right
from copy import copy
from dataclasses import dataclass, field
from fractions import Fraction
from typing import List, Optional, Sequence, Tuple, TypeVar, Union
from raiden.exceptions import UndefinedMediationFee
from raiden.transfer.architecture import State
from raiden.utils.typing import (
Balance,
FeeAmount,
PaymentWithFeeAmount,
ProportionalFeeAmount,
TokenAmount,
)
NUM_DISCRETISATION_POINTS = 21
class Interpolate: # pylint: disable=too-few-public-methods
""" Linear interpolation of a function with given points
Based on https://stackoverflow.com/a/7345691/114926
"""
def __init__(
self, x_list: Sequence[Union[Fraction, int]], y_list: Sequence[Union[Fraction, int]]
) -> None:
if any(y - x <= 0 for x, y in zip(x_list, x_list[1:])):
raise ValueError("x_list must be in strictly ascending order!")
self.x_list: List[Fraction] = [Fraction(x) for x in x_list]
self.y_list: List[Fraction] = [Fraction(y) for y in y_list]
intervals = zip(self.x_list, self.x_list[1:], y_list, y_list[1:])
self.slopes: List[Fraction] = [(y2 - y1) / (x2 - x1) for x1, x2, y1, y2 in intervals]
def __call__(self, x: Union[Fraction, int]) -> Fraction:
if not self.x_list[0] <= x <= self.x_list[-1]:
raise ValueError("x out of bounds!")
if x == self.x_list[-1]:
return self.y_list[-1]
i = bisect_right(self.x_list, x) - 1
return self.y_list[i] + self.slopes[i] * (Fraction(x) - self.x_list[i])
def __repr__(self) -> str:
return f"Interpolate({self.x_list}, {self.y_list})"
def sign(x: Union[float, Fraction]) -> int:
""" Sign of input, returns zero on zero input
"""
if x == 0:
return 0
else:
return 1 if x > 0 else -1
def _collect_x_values(
penalty_func_in: Interpolate,
penalty_func_out: Interpolate,
balance_in: Balance,
balance_out: Balance,
max_x: int,
) -> List[Fraction]:
""" Normalizes the x-axis of the penalty functions around the amount of
tokens being transferred.
A penalty function maps the participant's balance to a fee. These functions
are then used to penalize transfers that unbalance the node's channels, and
as a consequence incentivize transfers that re-balance the channels.
Here the x-axis of the penalty functions normalized around the current
channel's capacity. So that instead of computing:
penalty(current_capacity + amount_being_transferred)
One can simply compute:
penalty(amount_being_transferred)
To find the penalty fee for the current transfer.
"""
all_x_vals = [x - balance_in for x in penalty_func_in.x_list] + [
balance_out - x for x in penalty_func_out.x_list
]
limited_x_vals = (max(min(x, balance_out, max_x), 0) for x in all_x_vals)
return sorted(set(Fraction(x) for x in limited_x_vals))
def _cap_fees(
x_list: List[Fraction], y_list: List[Fraction]
) -> Tuple[List[Fraction], List[Fraction]]:
""" Insert extra points for intersections with x-axis, see `test_fee_capping` """
x_list = copy(x_list)
y_list = copy(y_list)
for i in range(len(x_list) - 1):
y1, y2 = y_list[i : i + 2]
if sign(y1) * sign(y2) == -1:
x1, x2 = x_list[i : i + 2]
new_x = x1 + abs(y1) / abs(y2 - y1) * (x2 - x1)
new_index = bisect(x_list, new_x)
x_list.insert(new_index, new_x)
y_list.insert(new_index, Fraction(0))
# Cap points that are below zero
y_list = [max(y, Fraction(0)) for y in y_list]
return x_list, y_list
def _mediation_fee_func(
schedule_in: "FeeScheduleState",
schedule_out: "FeeScheduleState",
balance_in: Balance,
balance_out: Balance,
receivable: TokenAmount,
amount_with_fees: Optional[PaymentWithFeeAmount],
amount_without_fees: Optional[PaymentWithFeeAmount],
cap_fees: bool,
) -> Interpolate:
""" Returns a function which calculates total_mediation_fee(x)
Either `amount_with_fees` or `amount_without_fees` must be given while the
other one is None. The returned function will depend on the value that is
not given.
"""
assert (
amount_with_fees is None or amount_without_fees is None
), "Must be called with either amount_with_fees or amount_without_fees as None"
# If either channel can't transfer even a single token, there can be no mediation.
if balance_out == 0 or receivable == 0:
raise UndefinedMediationFee()
# Add dummy penalty funcs if none are set
if not schedule_in._penalty_func:
schedule_in = copy(schedule_in)
schedule_in._penalty_func = Interpolate([0, balance_in + receivable], [0, 0])
if not schedule_out._penalty_func:
schedule_out = copy(schedule_out)
schedule_out._penalty_func = Interpolate([0, balance_out], [0, 0])
x_list = _collect_x_values(
penalty_func_in=schedule_in._penalty_func,
penalty_func_out=schedule_out._penalty_func,
balance_in=balance_in,
balance_out=balance_out,
max_x=receivable if amount_with_fees is None else balance_out,
)
# Sum up fees where either `amount_with_fees` or `amount_without_fees` is
# fixed and the other one is represented by `x`.
try:
y_list = [
schedule_in.fee(
balance_in, x if amount_with_fees is None else Fraction(amount_with_fees)
)
+ schedule_out.fee(
balance_out, -x if amount_without_fees is None else -Fraction(amount_without_fees)
)
for x in x_list
]
except ValueError:
raise UndefinedMediationFee()
if cap_fees:
x_list, y_list = _cap_fees(x_list, y_list)
return Interpolate(x_list, y_list)
T = TypeVar("T", bound="FeeScheduleState")
@dataclass
class FeeScheduleState(State):
# pylint: disable=not-an-iterable
cap_fees: bool = True
flat: FeeAmount = FeeAmount(0)
proportional: ProportionalFeeAmount = ProportionalFeeAmount(0) # as micros, e.g. 1% = 0.01e6
imbalance_penalty: Optional[List[Tuple[TokenAmount, FeeAmount]]] = None
_penalty_func: Optional[Interpolate] = field(init=False, repr=False, default=None)
def __post_init__(self) -> None:
self._update_penalty_func()
def _update_penalty_func(self) -> None:
if self.imbalance_penalty:
assert isinstance(self.imbalance_penalty, list)
x_list, y_list = tuple(zip(*self.imbalance_penalty))
self._penalty_func = Interpolate(x_list, y_list)
def fee(self, balance: Balance, amount: Fraction) -> Fraction:
return (
self.flat
+ Fraction(self.proportional, int(1e6)) * Fraction(abs(amount))
+ (self._penalty_func(balance + amount) - self._penalty_func(balance))
if self._penalty_func
else Fraction(0)
)
@staticmethod
def mediation_fee_func(
schedule_in: "FeeScheduleState",
schedule_out: "FeeScheduleState",
balance_in: Balance,
balance_out: Balance,
receivable: TokenAmount,
amount_with_fees: PaymentWithFeeAmount,
cap_fees: bool,
) -> Interpolate:
""" Returns a function which calculates total_mediation_fee(amount_without_fees) """
return _mediation_fee_func(
schedule_in=schedule_in,
schedule_out=schedule_out,
balance_in=balance_in,
balance_out=balance_out,
receivable=receivable,
amount_with_fees=amount_with_fees,
amount_without_fees=None,
cap_fees=cap_fees,
)
@staticmethod
def mediation_fee_backwards_func(
schedule_in: "FeeScheduleState",
schedule_out: "FeeScheduleState",
balance_in: Balance,
balance_out: Balance,
receivable: TokenAmount,
amount_without_fees: PaymentWithFeeAmount,
cap_fees: bool,
) -> Interpolate:
""" Returns a function which calculates total_mediation_fee(amount_with_fees) """
return _mediation_fee_func(
schedule_in=schedule_in,
schedule_out=schedule_out,
balance_in=balance_in,
balance_out=balance_out,
receivable=receivable,
amount_with_fees=None,
amount_without_fees=amount_without_fees,
cap_fees=cap_fees,
)
def linspace(start: TokenAmount, stop: TokenAmount, num: int) -> List[TokenAmount]:
""" Returns a list of num numbers from start to stop (inclusive). """
assert num > 1
assert start <= stop
step = (stop - start) / (num - 1)
result = []
for i in range(num):
result.append(TokenAmount(start + round(i * step)))
return result
def calculate_imbalance_fees(
channel_capacity: TokenAmount, proportional_imbalance_fee: ProportionalFeeAmount
) -> Optional[List[Tuple[TokenAmount, FeeAmount]]]:
""" Calculates a U-shaped imbalance curve
The penalty term takes the following value at the extrema:
channel_capacity * (proportional_imbalance_fee / 1_000_000)
"""
assert channel_capacity >= 0
assert proportional_imbalance_fee >= 0
if proportional_imbalance_fee == 0:
return None
if channel_capacity == 0:
return None
MAXIMUM_SLOPE = 0.1
max_imbalance_fee = channel_capacity * proportional_imbalance_fee / 1e6
assert proportional_imbalance_fee / 1e6 <= MAXIMUM_SLOPE / 2, "Too high imbalance fee"
# calculate function parameters
s = MAXIMUM_SLOPE
c = max_imbalance_fee
o = channel_capacity / 2
b = s * o / c
b = min(b, 10) # limit exponent to keep numerical stability
a = c / o ** b
def f(x: TokenAmount) -> FeeAmount:
return FeeAmount(int(round(a * abs(x - o) ** b)))
# calculate discrete function points
num_base_points = min(NUM_DISCRETISATION_POINTS, channel_capacity + 1)
x_values = linspace(TokenAmount(0), channel_capacity, num_base_points)
y_values = [f(x) for x in x_values]
return list(zip(x_values, y_values))
| 34.207358
| 98
| 0.663668
|
ad407364429060d5c3616fb9e42f221a150f5d0d
| 238
|
py
|
Python
|
scope.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
scope.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
scope.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
global_var = 1
def my_vars() :
print( 'Global Variable:' , global_var )
local_var = 2
print( 'Local Variable:' , local_var )
global inner_var
inner_var = 3
my_vars()
print( 'Coerced Global:' , inner_var )
| 13.222222
| 42
| 0.613445
|
cc893b5a3a2102a3681e5b53db186a18f81a9dc1
| 890
|
py
|
Python
|
Visualization/rendering_categorical_maps.py
|
liuxb555/earthengine-py-examples
|
cff5d154b15a17d6a241e3c003b7fc9a2c5903f3
|
[
"MIT"
] | 75
|
2020-06-09T14:40:11.000Z
|
2022-03-07T08:38:10.000Z
|
Visualization/rendering_categorical_maps.py
|
gentaprekazi/earthengine-py-examples
|
76ae8e071a71b343f5e464077afa5b0ed2f9314c
|
[
"MIT"
] | 1
|
2022-03-15T02:23:45.000Z
|
2022-03-15T02:23:45.000Z
|
Visualization/rendering_categorical_maps.py
|
gentaprekazi/earthengine-py-examples
|
76ae8e071a71b343f5e464077afa5b0ed2f9314c
|
[
"MIT"
] | 35
|
2020-06-12T23:23:48.000Z
|
2021-11-15T17:34:50.000Z
|
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load 2012 MODIS land cover and select the IGBP classification.
cover = ee.Image('MODIS/051/MCD12Q1/2012_01_01').select('Land_Cover_Type_1')
# Define a palette for the 18 distinct land cover classes.
igbpPalette = [
'aec3d4', # water
'152106', '225129', '369b47', '30eb5b', '387242', # forest
'6a2325', 'c3aa69', 'b76031', 'd9903d', '91af40', # shrub, grass
'111149', # wetlands
'cdb33b', # croplands
'cc0013', # urban
'33280d', # crop mosaic
'd7cdcc', # snow and ice
'f7e084', # barren
'6f6f6f' # tundra
]
# Specify the min and max labels and the color palette matching the labels.
Map.setCenter(-99.229, 40.413, 5)
Map.addLayer(cover,
{'min': 0, 'max': 17, 'palette': igbpPalette},
'IGBP classification')
# Display the map.
Map
| 28.709677
| 76
| 0.658427
|
a1ebc1841bb1d7d777d6556f7cfdf8151e4c0e9a
| 2,285
|
py
|
Python
|
xcenternet/datasets/ximilar_dataset.py
|
wanghh2000/xcenternet
|
458d43e01e96adf864809d9a15c756302ec75cd7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xcenternet/datasets/ximilar_dataset.py
|
wanghh2000/xcenternet
|
458d43e01e96adf864809d9a15c756302ec75cd7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xcenternet/datasets/ximilar_dataset.py
|
wanghh2000/xcenternet
|
458d43e01e96adf864809d9a15c756302ec75cd7
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import random
import os
import tensorflow as tf
from xcenternet.datasets.dataset import Dataset
from ximilar.client.utils.json_data import read_json_file_list
class XimilarDataset(Dataset):
def __init__(self, dataset_path, init_lr):
labels = read_json_file_list(os.path.join(dataset_path, "labels.json"))
self.labels = {label["id"]: key for key, label in enumerate(labels)}
records = read_json_file_list(os.path.join(dataset_path, "images.json"))
# TODO random split might not distribute labels correctly
random.seed(2020)
random.shuffle(records)
train_num = int(len(records) * 0.8)
self.records_train = records[:train_num]
self.records_validation = records[train_num:]
super().__init__(len(self.labels), init_lr)
@tf.function
def _load_image(self, img_path):
image_encoded = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(image_encoded)
return image
def _preprocess_record(self, records):
files, labels, bboxes = [], [], []
for rec in records:
b = [obj["data"] for obj in rec["objects"]]
b = [[bbox[1], bbox[0], bbox[3], bbox[2]] for bbox in b]
files.append(rec["_file"])
labels.append([self.labels[obj["detection_label"]] for obj in rec["objects"]])
bboxes.append(b)
data = (files, tf.ragged.constant(labels), tf.ragged.constant(bboxes))
return tf.data.Dataset.from_tensor_slices(data)
def load_train_datasets(self):
return self._preprocess_record(self.records_train), len(self.records_train)
def load_validation_datasets(self):
return self._preprocess_record(self.records_validation), len(self.records_validation)
def decode(self, image_path, labels, bboxes):
image = self._load_image(image_path)
h, w = tf.shape(image)[0], tf.shape(image)[1]
bboxes = bboxes.to_tensor()
bboxes /= tf.stack([h, w, h, w])
return image, labels, bboxes
def scheduler(self, epoch):
if epoch < 40:
return self.initial_learning_rate
elif epoch < 80:
return self.initial_learning_rate * 0.1
else:
return self.initial_learning_rate * 0.01
| 34.621212
| 93
| 0.650766
|
ec5a7a858a6de678092d5ec54cbe8c20f1869729
| 2,713
|
py
|
Python
|
tests/types/test_interfaces.py
|
ernestoarbitrio/strawberry
|
772703c7c47173eeb9e4e8d4c43fccd28166d520
|
[
"MIT"
] | null | null | null |
tests/types/test_interfaces.py
|
ernestoarbitrio/strawberry
|
772703c7c47173eeb9e4e8d4c43fccd28166d520
|
[
"MIT"
] | 1
|
2021-01-18T18:58:45.000Z
|
2021-01-18T19:39:09.000Z
|
tests/types/test_interfaces.py
|
ernestoarbitrio/strawberry
|
772703c7c47173eeb9e4e8d4c43fccd28166d520
|
[
"MIT"
] | null | null | null |
import strawberry
def test_defining_interface():
@strawberry.interface
class Node:
id: strawberry.ID
definition = Node._type_definition
assert definition.name == "Node"
assert len(definition.fields) == 1
assert definition.fields[0].name == "id"
assert definition.fields[0].type == strawberry.ID
assert definition.is_interface
def test_implementing_interfaces():
@strawberry.interface
class Node:
id: strawberry.ID
@strawberry.type
class User(Node):
name: str
definition = User._type_definition
assert definition.name == "User"
assert len(definition.fields) == 2
assert definition.fields[0].name == "id"
assert definition.fields[0].type == strawberry.ID
assert definition.fields[1].name == "name"
assert definition.fields[1].type == str
assert definition.is_interface is False
assert definition.interfaces == [Node._type_definition]
def test_implementing_interface_twice():
@strawberry.interface
class Node:
id: strawberry.ID
@strawberry.type
class User(Node):
name: str
@strawberry.type
class Person(Node):
name: str
definition = User._type_definition
assert definition.name == "User"
assert len(definition.fields) == 2
assert definition.fields[0].name == "id"
assert definition.fields[0].type == strawberry.ID
assert definition.fields[1].name == "name"
assert definition.fields[1].type == str
assert definition.is_interface is False
assert definition.interfaces == [Node._type_definition]
definition = Person._type_definition
assert definition.name == "Person"
assert len(definition.fields) == 2
assert definition.fields[0].name == "id"
assert definition.fields[0].type == strawberry.ID
assert definition.fields[1].name == "name"
assert definition.fields[1].type == str
assert definition.is_interface is False
assert definition.interfaces == [Node._type_definition]
def test_interfaces_can_implement_other_interfaces():
@strawberry.interface
class Node:
id: strawberry.ID
@strawberry.interface
class UserNodeInterface(Node):
id: strawberry.ID
name: str
@strawberry.type
class Person(UserNodeInterface):
id: strawberry.ID
name: str
assert UserNodeInterface._type_definition.is_interface is True
assert UserNodeInterface._type_definition.interfaces == [Node._type_definition]
definition = Person._type_definition
assert definition.is_interface is False
assert definition.interfaces == [
UserNodeInterface._type_definition,
Node._type_definition,
]
| 24.663636
| 83
| 0.693328
|
64931123f69114f98a410d154434d1afae978604
| 561
|
py
|
Python
|
app/db/models/users.py
|
JvitorS23/jobboard_fastAPI
|
5abcc69f19417ad99352c0434db96407e2d7da76
|
[
"MIT"
] | 1
|
2021-10-01T16:40:33.000Z
|
2021-10-01T16:40:33.000Z
|
app/db/models/users.py
|
JvitorS23/jobboard_fastAPI
|
5abcc69f19417ad99352c0434db96407e2d7da76
|
[
"MIT"
] | null | null | null |
app/db/models/users.py
|
JvitorS23/jobboard_fastAPI
|
5abcc69f19417ad99352c0434db96407e2d7da76
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship
from db.base_class import Base
class User(Base):
"""Users model"""
id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True, nullable=False)
email = Column(String, nullable=False, unique=True, index=True)
password = Column(String, nullable=False)
is_active = Column(Boolean, default=True)
is_superuser = Column(Boolean, default=False)
jobs = relationship('Job', back_populates='owner')
| 35.0625
| 67
| 0.734403
|
8228f83a7623c33c0ae02ed77c486b687e4848d7
| 1,879
|
py
|
Python
|
python/metaparticle_pkg/runner/docker_runner.py
|
bwoodhouse322/package
|
c9ffaece59637689ed48607c157227818ea7f60f
|
[
"MIT"
] | 512
|
2017-10-05T06:19:10.000Z
|
2022-01-09T23:04:28.000Z
|
python/metaparticle_pkg/runner/docker_runner.py
|
bwoodhouse322/package
|
c9ffaece59637689ed48607c157227818ea7f60f
|
[
"MIT"
] | 116
|
2017-12-05T16:14:26.000Z
|
2020-08-25T03:39:57.000Z
|
python/metaparticle_pkg/runner/docker_runner.py
|
bwoodhouse322/package
|
c9ffaece59637689ed48607c157227818ea7f60f
|
[
"MIT"
] | 73
|
2017-12-07T00:00:36.000Z
|
2022-02-25T20:06:29.000Z
|
from __future__ import absolute_import
import logging
from docker import APIClient
# use a generic logger name: metaparticle_pkg.runner
logger = logging.getLogger('.'.join(__name__.split('.')[:-1]))
class DockerRunner:
def __init__(self):
self.docker_client = None
def run(self, img, name, options):
if self.docker_client is None:
self.docker_client = APIClient(version='auto')
ports = []
host_config = None
# Prepare port configuration
if options.ports is not None and len(options.ports) > 0:
for port_number in options.ports:
ports.append(port_number)
host_config = self.docker_client.create_host_config(
port_bindings={p: p for p in ports}
)
# Launch docker container
container = self.docker_client.create_container(
img,
host_config=host_config,
name=name,
ports=ports
)
logger.info('Starting container {}'.format(container))
self.docker_client.start(container=container.get('Id'))
self.container = container
def logs(self, *args, **kwargs):
if self.docker_client is None:
self.docker_client = APIClient(version='auto')
# seems like we are hitting bug
# https://github.com/docker/docker-py/issues/300
log_stream = self.docker_client.logs(
self.container.get('Id'),
stream=True,
follow=True
)
for line in log_stream:
logger.info(line.decode("utf-8").strip('\n'))
def cancel(self, name):
if self.docker_client is None:
self.docker_client = APIClient(version='auto')
self.docker_client.kill(self.container.get('Id'))
self.docker_client.remove_container(self.container.get('Id'))
| 29.825397
| 69
| 0.611495
|
6527b0ed25191dbf47d2fef3f6730e0f11a350d5
| 4,991
|
py
|
Python
|
plonk/simulation/evolution.py
|
matthewturk/plonk
|
a2cfb8ebb858cff8fdfdfeb0be1fd672b9022ed1
|
[
"MIT"
] | 1
|
2019-09-10T16:22:52.000Z
|
2019-09-10T16:22:52.000Z
|
plonk/simulation/evolution.py
|
benedettaveronesi/plonk
|
2403e41adfaa74688404f592462e011fa9f74516
|
[
"MIT"
] | null | null | null |
plonk/simulation/evolution.py
|
benedettaveronesi/plonk
|
2403e41adfaa74688404f592462e011fa9f74516
|
[
"MIT"
] | null | null | null |
"""Evolution class for global quantites.
This module contains the Evolution class for tracking global quantities
and sink particle time series data. These files track averaged
quantities that are more frequently output than snapshot files.
"""
from __future__ import annotations
from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from pandas import DataFrame
class Evolution:
"""Smoothed particle hydrodynamics simulation time series object.
Evolution files track global quantities, such as energy, momentum,
and density, over time. The time increments in these files is
smaller than the snapshot file output time. These files are
typically stored as text files.
The data is stored as a pandas DataFrame.
Examples
--------
Reading a single evolution file into an Evolution object.
>>> file_name = 'simulation.ev'
>>> ev = plonk.load_ev(file_name)
Reading a collection of evolution files into an Evolution object.
>>> file_names = ('sim01.ev', 'sim02.ev', 'sim03.ev')
>>> ev = plonk.load_ev(file_names)
Accessing the data as a pandas DataFrame or Series.
>>> df = ev.data
>>> time = ev.data['time']
Plotting kinetic and thermal energy against time using pandas.
>>> ev.plot('time', ['ekin', 'etherm'])
"""
def __init__(self):
self.file_paths: Tuple[Path, ...]
self.file_names: Tuple[str, ...]
def load_from_file(
self,
filenames: Union[str, Path, Tuple[str], Tuple[Path], List[str], List[Path]],
) -> Evolution:
"""Load from file(s).
Parameters
----------
filename(s)
Collection of paths to evolution file(s) in chronological order.
These should all contain the same columns.
"""
if isinstance(filenames, (str, Path)):
_filenames = [filenames]
elif isinstance(filenames, (list, tuple)):
_filenames = list(filenames)
else:
raise ValueError('filenames is not a known type')
_file_paths = list()
_file_names = list()
for filename in _filenames:
path = Path(filename)
_file_paths.append(path.resolve())
_file_names.append(path.name)
self.file_paths = tuple(_file_paths)
self.file_names = tuple(_file_names)
_check_file_consistency(self.file_paths)
self._columns = _get_columns(self.file_paths[0])
self._data = self._get_data()
return self
@property
def columns(self) -> Tuple[str, ...]:
"""List of available time evolution data."""
return self._columns
@property
def data(self) -> DataFrame:
"""Time evolution data as a pandas DataFrame."""
return self._data
def plot(self, *args, **kwargs):
"""Plot using pandas."""
return self.data.plot(*args, **kwargs)
def _get_data(self) -> DataFrame:
times = list()
for filename in self.file_paths:
times.append(np.loadtxt(filename, usecols=0))
_skiprows = [0]
if len(times) > 1:
for t1, t2 in zip(times, times[1:]):
_skiprows.append(np.where(t2 < t1[-1])[0][-1] + 2)
df = pd.concat(
(
pd.read_csv(
f,
names=self._columns,
skiprows=skiprows,
skipinitialspace=True,
delim_whitespace=True,
comment='#',
)
for f, skiprows in zip(self.file_paths, _skiprows)
)
)
df.reset_index(inplace=True, drop=True)
return df
def __repr__(self):
"""Dunder repr method."""
return self.__str__()
def __str__(self):
"""Dunder str method."""
return f'<plonk.Evolution: "{self.file_names}">'
def __len__(self):
"""Dunder len method."""
return len(self.data)
def _get_columns(filename: Path) -> Tuple[str, ...]:
with open(filename) as f:
column_line = f.readline().strip('\n')
_column_line = [item.strip('] ')[2:].strip(' ') for item in column_line.split('[')]
return tuple(_column_line[1:])
def _check_file_consistency(filenames: Tuple[Path, ...]) -> None:
columns = _get_columns(filenames[0])
for filename in filenames:
columns_previous = columns
columns = _get_columns(filename)
if columns != columns_previous:
raise ValueError('files have different columns')
def load_ev(
filenames: Union[str, Path, Tuple[str], Tuple[Path], List[str], List[Path]],
) -> Evolution:
"""Load Evolution from file(s).
Parameters
----------
filename(s)
Collection of paths to evolution file(s) in chronological order.
These should all contain the same columns.
"""
return Evolution().load_from_file(filenames)
| 28.19774
| 87
| 0.602084
|
ad192cb5e3ac0daa0885231e4d6fa50e43873aeb
| 381
|
py
|
Python
|
fides/__init__.py
|
fides-dev/fides
|
f481387a3e01a1480c556a826cbdaa9a99636c34
|
[
"BSD-3-Clause"
] | 7
|
2020-11-12T19:45:14.000Z
|
2021-12-16T19:17:44.000Z
|
fides/__init__.py
|
fides-dev/fides
|
f481387a3e01a1480c556a826cbdaa9a99636c34
|
[
"BSD-3-Clause"
] | 44
|
2020-11-12T18:13:06.000Z
|
2022-02-10T17:15:14.000Z
|
fides/__init__.py
|
fides-dev/fides
|
f481387a3e01a1480c556a826cbdaa9a99636c34
|
[
"BSD-3-Clause"
] | 2
|
2020-12-03T11:11:11.000Z
|
2021-04-02T13:25:09.000Z
|
"""
Fides
-----------
Fides is an interior trust-region reflective optimizer
"""
# flake8: noqa
from .minimize import Optimizer
from .hessian_approximation import (
SR1, BFGS, DFP, FX, HybridFixed, GNSBFGS, BB, BG, Broyden, SSM, TSSM
)
from .logging import create_logger
from .version import __version__
from .constants import Options, SubSpaceDim, StepBackStrategy, ExitFlag
| 25.4
| 72
| 0.753281
|
3f01a0eb7b0c9450b6538c444b2252788996b8fa
| 870
|
py
|
Python
|
manpage-builder/conf.py
|
domdfcoding/rsc-on-this-day
|
4669b2608f93394e2c53fb163ea5b6c6e0e3fd7d
|
[
"MIT"
] | null | null | null |
manpage-builder/conf.py
|
domdfcoding/rsc-on-this-day
|
4669b2608f93394e2c53fb163ea5b6c6e0e3fd7d
|
[
"MIT"
] | 23
|
2020-11-16T23:47:27.000Z
|
2022-01-16T01:24:45.000Z
|
manpage-builder/conf.py
|
domdfcoding/rsc-on-this-day
|
4669b2608f93394e2c53fb163ea5b6c6e0e3fd7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# stdlib
import os
import re
import sys
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(".."))
# this package
from __pkginfo__ import __author__, __copyright__, __version__, modname
project = modname
slug = re.sub(r'\W+', '-', modname.lower())
release = __version__
author = __author__
language = "en"
source_suffix = ".rst"
exclude_patterns = []
suppress_warnings = ["image.nonlocal_uri"]
pygments_style = "default"
version = f"{modname} {__version__}"
copyright = (
f"{__copyright__}. License GPLv3+: GNU GPL version 3 or later "
f"<http://gnu.org/licenses/gpl.html>\n\n"
"This is free software: you are free to change and redistribute it under certain conditions.\n\n"
"There is NO WARRANTY, to the extent permitted by law."
)
master_doc = "manpage"
man_pages = [("manpage", slug, modname, [author], 1)]
| 24.166667
| 99
| 0.712644
|
bfa15b3f796454b0ad89206e00ad83075959f38f
| 2,442
|
py
|
Python
|
examples/adspygoogle/dfp/v201208/update_third_party_slot.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/update_third_party_slot.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
examples/adspygoogle/dfp/v201208/update_third_party_slot.py
|
krux/adspygoogle
|
6505a71122f45fe3e675f27f2c29f67a1768069b
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-04-02T19:00:31.000Z
|
2020-08-06T03:28:38.000Z
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the first third party slot's description."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
third_party_slot_service = client.GetService(
'ThirdPartySlotService', version='v201208')
# Create statement object to only select third party slots that are active.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
filter_statement = {'query': 'WHERE status = :status LIMIT 1',
'values': values}
# Get a third party slot by statement.
response = third_party_slot_service.GetThirdPartySlotsByStatement(
filter_statement)[0]
third_party_slots = []
if 'results' in response:
third_party_slots = response['results']
if third_party_slots:
# Update the local third party slot object by changing the description.
third_party_slot = third_party_slots[0]
third_party_slot['description'] = 'Updated description.'
# Update third party slots remotely.
third_party_slot = third_party_slot_service.UpdateThirdPartySlot(
third_party_slot)[0]
# Display results.
if third_party_slot:
print ('A third party slot with id \'%s\' and description \'%s\' was '
'updated.' % (third_party_slot['id'],
third_party_slot['description']))
else:
print 'No third party slots were updated.'
else:
print 'No third party slots found to update.'
| 33
| 80
| 0.708436
|
0de790684aac5e5a64aefc7bc9e7bb045df15b6c
| 1,083
|
py
|
Python
|
returns/primitives/types.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
returns/primitives/types.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
returns/primitives/types.py
|
ksurta/returns
|
9746e569303f214d035462ae3dffe5c49abdcfa7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Any, NoReturn
from returns.primitives.exceptions import ImmutableStateError
class Immutable(object):
"""
Helper type for objects that should be immutable.
When applied, each instance becames immutable.
Nothing can be added or deleted from it.
.. code:: python
>>> from returns.primitives.types import Immutable
>>> class MyModel(Immutable):
... ...
...
>>> model = MyModel()
.. code::
>>> model.prop = 1
Traceback (most recent call last):
...
returns.primitives.exceptions.ImmutableStateError
See :class:`returns.primitives.container.BaseContainer` for examples.
"""
def __setattr__(self, attr_name: str, attr_value: Any) -> NoReturn:
"""Makes inner state of the containers immutable for modification."""
raise ImmutableStateError()
def __delattr__(self, attr_name: str) -> NoReturn: # noqa: WPS603
"""Makes inner state of the containers immutable for deletion."""
raise ImmutableStateError()
| 26.414634
| 77
| 0.65097
|
8f9489601bb7707826675e276fbf1cdfccc5a057
| 40,692
|
py
|
Python
|
plotly/vis.py
|
NREL/MetMastVis
|
0c3dd87540471c061eb491c871fdb32e6dabd31b
|
[
"Apache-2.0"
] | 1
|
2018-05-25T20:03:48.000Z
|
2018-05-25T20:03:48.000Z
|
plotly/vis.py
|
nhamilto/MetMast
|
38475682adb21081c86c58e9008a278971306c23
|
[
"Apache-2.0"
] | null | null | null |
plotly/vis.py
|
nhamilto/MetMast
|
38475682adb21081c86c58e9008a278971306c23
|
[
"Apache-2.0"
] | 2
|
2018-06-07T20:00:03.000Z
|
2020-11-26T21:52:04.000Z
|
"""
:module: vis
:platform: Unix, Windows
:synopsis: This code is used as a visualization library for the Met Mast data so it is specifically designed to handle MetDat object from the "met_funcs.py" library.
:moduleauthor: Nicholas Hamilton <Nicholas.Hamilton@nrel.gov> Rafael Mudafort <Rafael.Mudafort@nrel.gov> Lucas McCullum <Lucas.McCullum@nrel.gov>
"""
###########################################
# Visualization
###########################################
import utils
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from colour import Color
from windrose import WindroseAxes
import pandas as pd
plt.rc('font', family='serif')
plt.rc('font', size=12)
plt.rc('facecolor')
def cumulative_profile(metdat, catinfo, category=None):
"""**Get Variable Profile**.
Plot the vertical profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
# extract vertical locations of data from variable names
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
plotdat = metdat[colnames].mean()
fig, ax = plt.subplots(figsize=(3.5,5))
ax.plot(plotdat, vertlocs)
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def monthly_profile(metdat, catinfo, category=None, basecolor='cycle'):
"""**Get Monthly Profile**.
Plot the monthly profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. basecolor (string) [default: 'cycle']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
months = utils.monthnames()
colors = utils.get_colors(len(months), basecolor=basecolor)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
plotdat = metdat[colnames].groupby(metdat.index.month).mean()
fig, ax = plt.subplots(figsize=(3.5,5), sharex=True, sharey=True)
for iax in range(len(months)):
ax.plot(plotdat.xs(iax+1), vertlocs, color=colors[iax])
leg = ax.legend(months, loc=7, bbox_to_anchor=(1.75, 0.5), edgecolor='w')
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def stability_profile(metdat, catinfo, category=None, vertloc=80, basecolor='cycle'):
"""**Get Stability Profile**.
Plot the stability profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'cycle]: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
stab, stabloc, ind = utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
colors = utils.get_colors(5,basecolor=basecolor)
stabconds = utils.get_stabconds()
plotdat = metdat.groupby(stab).mean()
pdat = plotdat[catinfo['columns'][category]].get_values()
# Extract vertical locations of data from variable names
_, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
fig, ax = plt.subplots(figsize=(3.5,5))
for ii, cond in enumerate(stabconds):
ax.plot(pdat[ii,ind], vertlocs, color=colors[ii])
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5), frameon=False)
fig.tight_layout()
return fig, ax
def monthly_stability_profiles(metdat, catinfo, category=None, vertloc=80, basecolor='span'):
"""**Get Monthly Stability Profile**.
Plot the monthly stability profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
stab, stabloc, ind = utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
plotdat = metdat.groupby([metdat.index.month, stab])
colors = utils.get_colors(5,basecolor='span')
months = utils.monthnames()
stabconds = utils.get_stabconds()
# extract vertical locations of data from variable names
_, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
fig, ax = plt.subplots(4,3, figsize=(8,13), sharex=True, sharey=True)
for iax, month in enumerate(months):
for ii, cond in enumerate(stabconds):
pdat = plotdat[catinfo['columns'][category]].get_group((iax+1, cond)).mean()
ax.flatten()[iax].plot(pdat[ind], vertlocs, color=colors[ii])
ax.flatten()[iax].set_title(month)
fig.text(0,0.58, 'Probe Height [m]', ha='center', va='center', fontsize=14, rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.12), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.175)
fig.text(0.525,0.135, catinfo['labels'][category], ha='center', va='center', fontsize=14)
return fig, ax
def hourlyplot(metdat, catinfo, category=None, basecolor='span'):
"""**Get Hourly Averaged Profile**.
Plot the hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. basecolor (string): Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
colors = utils.get_colors(len(catinfo['columns'][category]), basecolor=basecolor, reverse=True)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category], reverse=True)
plotdat = metdat[colnames].groupby(metdat.index.hour).mean()
fig, ax = plt.subplots(figsize=(5,3.5), sharex=True, sharey=True)
for iax in range(len(colnames)):
ax.plot(plotdat[colnames[iax]], color=colors[iax])
leg = ax.legend([str(v) + ' m' for v in vertlocs], loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
ax.set_xlabel('Time [hour]')
ax.set_ylabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def monthlyhourlyplot(metdat, catinfo, category=None, basecolor='span'):
"""**Get Monthly Hourly Averaged Profile**.
Plot the monthly hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
months = utils.monthnames()
colors = utils.get_colors(len(catinfo['columns'][category]), basecolor=basecolor, reverse=True)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category], reverse=True)
plotdat = metdat[colnames].groupby([metdat.index.month.rename('month'), metdat.index.hour.rename('hour')]).mean()
fig, ax = plt.subplots(4,3, figsize=(9,11), sharex=True, sharey=True)
for iax in range(len(months)):
for catitem in range(len(colnames)):
ax.flatten()[iax].plot(plotdat[colnames[catitem]].xs(iax+1), color=colors[catitem])
ax.flatten()[iax].set_title(months[iax], fontsize=12)
fig.text(0.5,0.2, 'Time of Day [hour]', ha='center', va='center')
leg = fig.legend([str(v) + ' m' for v in vertlocs], loc = 'upper center', bbox_to_anchor = (0,-0.825,1,1), bbox_transform = plt.gcf().transFigure, frameon=False, ncol=2)
fig.tight_layout()
fig.subplots_adjust(bottom=0.25)
fig.text(0,0.6125, catinfo['labels'][category], ha='center', va='center', rotation='vertical')
return fig, ax
def rose_fig(metdat, catinfo, category=None, vertloc=80, bins=6, nsector=36, ylim=None, noleg=False):
"""**Get Wind Rose Figure**.
Plot the wind rose of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. bins (integer, list) [default: 6]: Indicates the number of equally spaced bins to divide the variable.
6. nsector (integer) [default: 36]: Indicated the number of sector directions to divide the rose figure.
7. ylim (float) [default: None]: Provides the maximum value for the frequency of observations and is used to plot different roses with uniform limits.
8. noleg (Boolean) [default: False]: Determines whether or not there will be a legend to the figure.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
3. leg (Matplotlib Legend): The legend object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
winddir = metdat[dircol]
var = metdat[varcol]
# get var divisions set up
if isinstance(bins, int):
nbins = bins
else:
nbins = len(bins)
# set up plotting colors
colors = utils.get_colors(nbins-1, basecolor='span')
colors += ['#3A4246'] # add something dark to the end.
colors = tuple(colors[0:nbins])
# built figure
fig = plt.figure()
ax = WindroseAxes.from_ax(fig=fig)
ax.bar(winddir, var, normed=True, opening=0.95, edgecolor='white', bins=bins, nsector=nsector,colors=colors, linewidth=0.35)
# legend
leg=['blank']
if noleg is not True:
leg = ax.set_legend(loc=7,bbox_to_anchor=(1.55,0.5), fontsize=10, frameon=False)
# add labels to legend
leg.set_title(catinfo['labels'][category])
fig.text(0.875, 0.275, r'$z={}$ m'.format(vertloc))
# adjust plot for specified max frequency
if ylim is None:
ylim = ax.get_ylim()[-1]
# frequency axis limits and labels
ax.set_ylim(0,ylim)
ax.set_yticks(np.linspace(0,ylim,4))
ax.set_yticklabels([str(round(x,1)) for x in np.linspace(0,ylim,4)])
return fig, ax, leg
def monthly_rose_fig(metdat, catinfo, category=None, vertloc=80, bins=6, nsector=36, ylim=None, noleg=False):
"""**Get Monthly Wind Rose Figure**.
Plot the monthly wind rose of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. bins (integer, list) [default: 6]: Indicates the number of equally spaced bins to divide the variable.
6. nsector (integer) [default: 36]: Indicated the number of sector directions to divide the rose figure.
7. ylim (float) [default: None]: Provides the maximum value for the frequency of observations and is used to plot different roses with uniform limits.
8. noleg (Boolean) [default: False]: Determines whether or not there will be a legend to the figure.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
3. leg (Matplotlib Legend): The legend object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
plotdat = metdat.groupby(metdat.index.month)
winddir = plotdat[dircol]
var = plotdat[varcol]
months = utils.monthnames()
# wind speed bins to use in wind roses
# get var divisions set up
if isinstance(bins, int):
nbins = bins
else:
nbins = len(bins)
# set up plotting colors
colors = utils.get_colors(nbins-1, basecolor='span')
colors += ['#3A4246'] # add something dark to the end.
colors = tuple(colors[0:nbins])
fig = plt.figure(figsize=(9,13))
for iax,month in enumerate(months):
ax = fig.add_subplot(4,3,iax+1, projection="windrose")
ax.bar(winddir.get_group(iax+1), var.get_group(iax+1),
bins=bins, nsector=36, colors=colors,
linewidth=0.35,
normed=True)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(12)
ax.set_title(month,fontsize=12,y=1.15)
if iax == 10:
leg = plt.legend(loc=8, ncol=2, bbox_to_anchor = (0.5,-0.65), frameon=False)
leg.set_title(catinfo['labels'][category])
fig.text(0.5, -0.085, r'$z={}$ m'.format(vertloc), ha='center', va='center')
axes = fig.get_children()[1:]
# adjust plot for specified max frequency
if ylim is None:
ylim = 0.0
for iax,month in enumerate(months):
ylim = np.max([ylim, axes[iax].get_ylim()[-1]])
for iax,month in enumerate(months):
axes[iax].set_ylim(0,ylim)
axes[iax].set_yticks(np.linspace(0.0,ylim,4))
# print(axes[iax].get_yticks())
axes[iax].set_yticklabels([str(np.round(x,decimals=1)) for x in axes[iax].get_yticks()])
fig.tight_layout()
return fig, axes, leg
def winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):
"""**Get Wind Direction Scatter Figure**.
Plot the wind direction scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float): Describes the desired vertical location alond the tower for analysis.
5. basecolor (string): Provides the color code information to get from "utils.py".
6. exclude_angles (tuple, list): Defines the start and stop angles to shade out regions according to International Electrotechnical Commission (IEC) standards.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_nrelcolors()
fig = plt.figure(figsize=(8,2.5))
ax = fig.add_subplot(111)
ax.scatter(metdat[dircol], metdat[varcol], marker='o',facecolor='w',color='k',lw=0.5,alpha=0.7)
ax.set_xlim([0,360])
for ii in range(len(exclude_angles)):
ax.axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=colors[basecolor][0])
ax.set_title(r'$z={}$ m'.format(vertloc))
ax.set_xlabel(r'Wind Direction [$^\circ$]')
ax.set_ylabel(catinfo['labels'][category])
return fig, ax#, leg
def stability_winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):
"""**Get Wind Direction Stability Scatter Figure**.
Plot the wind direction stability scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'red']: Provides the color code information to get from "utils.py".
6. exclude_angles (tuple, list) [default: [(46, 228)]]: Defines the start and stop angles to shade out regions according to International Electrotechnical Commission (IEC) standards.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
colors = utils.get_colors(len(stabconds),basecolor='span')
nrelcolors = utils.get_nrelcolors()
# Set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
# dirind = utils.get_nearest_direction(metdat[category])
fig, ax = plt.subplots(len(stabconds),1, sharex=True, sharey=True, figsize=(6,8))
plotdat = metdat.groupby(stabcol)
for ind, stabcond in enumerate(stabconds):
ax.flatten()[ind].scatter(plotdat[dircol].get_group(stabcond),plotdat[varcol].get_group(stabcond),
marker='o',facecolor=colors[ind],color='k',lw=0.5,alpha=0.7)
ax.flatten()[ind].set_xlim([0,360])
# ax.flatten()[ind].set_ylim([0,120])
ax.flatten()[ind].legend([stabcond], fontsize=12, loc=1, frameon=False)
for ii in range(len(exclude_angles)):
ax.flatten()[ind].axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=nrelcolors[basecolor][0])
if ind == 0:
ax.flatten()[ind].set_title(r'$z={}$ m'.format(vertloc))
fig.tight_layout()
fig.text(0.5,0, r'Wind Direction [$^\circ$]', ha='center', va='center')
fig.text(0, 0.5, catinfo['labels'][category], ha='center', va='center', rotation='vertical')
return fig, ax #, leg
def groupby_scatter(metdat, catinfo, category, abscissa='direction', groupby='ti', nbins=5, vertloc=80, basecolor='span'):
"""**Get Wind Direction Grouped Scatter Figure**.
Plot the wind direction grouped scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. abscissa (string) [default: 'direction']: independent variable to plot again
5. groupby (string) [default: 'ti']: Describes which categories to group by.
6. nbins (integer) [default: 5]: Divides the *groupby* variable into bins.
7. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
8. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
groupcol, _, _= utils.get_vertical_locations(catinfo['columns'][groupby], location=vertloc)
abscol, _, _= utils.get_vertical_locations(catinfo['columns'][abscissa], location=vertloc)
temp = pd.cut(metdat[groupcol],5)
plotdat = metdat[[varcol,abscol,groupcol]].groupby(temp)
groups = list(plotdat.indices.keys())
colors = utils.get_colors(len(groups), basecolor=basecolor)
fig, ax = plt.subplots(figsize=(5,3), sharex=True, sharey=True)
for iax,group in enumerate(groups):
ax.scatter(plotdat[abscol].get_group(group), plotdat[varcol].get_group(group),facecolor=colors[iax],color='k',lw=0.5,alpha=0.7)
leg = ax.legend(groups, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
leg.set_title(catinfo['labels'][groupby])
# labels
ax.set_xlabel(catinfo['labels'][abscissa])
ax.set_ylabel(catinfo['labels'][category])
ax.set_title(r'$z={}$ m'.format(vertloc))
fig.tight_layout()
return fig, ax #, leg
def hist(metdat, catinfo, category, vertloc=80, basecolor='blue'):
"""**Get Histogram Figure**.
Plot the histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'blue']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
colors = utils.get_nrelcolors()
color = colors[basecolor][0]
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
data = metdat[varcol].dropna(how='any')
fig, ax = plt.subplots(figsize=(5,3))
ax.hist(data,
bins = 35,
facecolor=color,
edgecolor='k',
weights=np.ones(len(data)) / len(data), density=False)
ax.set_title(r'$z={}$ m'.format(vertloc))
fig.text(0,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
fig.tight_layout()
return fig, ax
def monthly_hist(metdat, catinfo, category, vertloc=80, basecolor='blue'):
"""**Get Monthly Histogram Figure**.
Plot the monthly histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'blue']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
colors = utils.get_nrelcolors()
color = colors[basecolor][0]
months = utils.monthnames()
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
temp = metdat.groupby(metdat.index.month)
temp = temp[varcol]
binwidth = (metdat[varcol].dropna().max() - metdat[varcol].dropna().min())/35
bins = np.arange(metdat[varcol].dropna().min(),metdat[varcol].dropna().max(), binwidth)
fig, ax = plt.subplots(4,3, figsize=(9,9), sharex=True, sharey=True)
for im,month in enumerate(months):
data = temp.get_group(im+1).dropna()
ax.flatten()[im].hist(data,
bins=bins,
color=color,
edgecolor='k',
weights=np.ones(len(data))/len(data)*100)
ax.flatten()[im].set_title(month, fontsize=12)
fig.tight_layout()
fig.text(0,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
return fig, ax
def hist_by_stability(metdat, catinfo, category, vertloc=80, basecolor='span'):
"""**Get Stability Grouped Histogram Figure**.
Plot the stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds),basecolor=basecolor)
metdat = metdat.groupby(stabcol)
fig,ax = plt.subplots(len(stabconds),1, figsize=(4,6), sharex=True, sharey=True)
for ii,stab in enumerate(stabconds):
data = metdat[varcol].get_group(stab).dropna()
ax.flatten()[ii].hist(data,
facecolor=colors[ii],
edgecolor='k',
bins=50,
weights=np.ones(len(data)) / len(data),
density=False)
ax.flatten()[ii].legend([stab], fontsize=10, frameon=False)
ax.flatten()[0].set_title(r'$z={}$m'.format(vertloc))
fig.text(-0.03,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
fig.tight_layout()
return fig, ax
def stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
"""**Get Stacked Stability Grouped Histogram Figure**.
Plot the stacked stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float): Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
plotdat = metdat.groupby(stabcol)
fig, ax = plt.subplots()
temp = pd.DataFrame({cond: plotdat[varcol].get_group(cond) for cond in stabconds})
temp.plot.hist(ax=ax,
stacked=True,
color=colors,
bins=35,
edgecolor='k',
legend=False,
# weights = np.ones(temp.shape) / len(temp.index),
density=True)
ax.set_xlabel(catinfo['labels'][category])
ax.set_title(r'$z={}$m'.format(vertloc))
fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
fig.tight_layout()
return fig, ax
def monthly_stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
"""**Get Monthly Stacked Stability Grouped Histogram Figure**.
Plot the monthly stacked stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
months = utils.monthnames()
plotdat = metdat.groupby([metdat.index.month, stabcol])
plotdat = plotdat[varcol]
fig, ax = plt.subplots(4,3, figsize=(9,10), sharex=True, sharey=True)
for iax, month in enumerate(months):
temp = pd.DataFrame({cond: plotdat.get_group((iax+1,cond)) for cond in stabconds})
temp.plot.hist(ax=ax.flatten()[iax],
stacked=True,
color=colors,
bins=35,
edgecolor='k',
legend=False,
# weights = np.ones(temp.dropna().shape) / np.prod(temp.shape),
density=True)
ax.flatten()[iax].set_title(month)
ax.flatten()[iax].set_ylabel('')
# fig.legend(stabconds, loc=8, bbox_to_anchor=(0, -0.1), edgecolor='w')
fig.text(0,0.58, 'Frequency', ha='center', va='center', fontsize=14, rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.15), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.21)
fig.text(0.5, 0.16, catinfo['labels'][category], ha='center', va='center', fontsize=14)
return fig, ax#, leg
def normalized_hist_by_stability(metdat, catinfo, vertloc=80):
"""**Get Normalized Stability Grouped Histogram Figure**.
Plot the normalized stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
temp = metdat[stabcol].dropna()
garb = temp.groupby(temp.index.hour).value_counts(normalize=True)
garb.index.names = ['hour','stabclass']
garb = garb.reorder_levels(['stabclass','hour'])
hours = np.arange(24)
newbottom = np.zeros(24)
fig,ax = plt.subplots()
for jj,cond in enumerate(stabconds):
# Use this for missing data, also works for full data
a = garb.loc[cond]
b = a.index.tolist()
c = a.values.tolist()
for i in range(len(hours)):
if (hours[i]) in b:
pass
else:
b.insert(i,hours[i])
c.insert(i,0)
d = pd.Series(data = c, index = b)
ax.bar(hours, d, color=colors[jj], bottom=newbottom)
newbottom += c #<-- for if missing data, also works for full data
#ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)
#newbottom += garb.loc[cond]
ax.set_ylabel('Probability [%]')
ax.set_xlabel('Time of Day [Hour]')
fig.legend(stabconds)
#fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5),framealpha=0)
fig.tight_layout()
return fig, ax
def normalized_monthly_hist_by_stability(metdat, catinfo, vertloc=80):
"""**Get Normalized Monthly Stability Grouped Histogram Figure**.
Plot the normalized monthly stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
months = utils.monthnames()
hours = np.arange(24)
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
stabconds = utils.get_stabconds()
colors = utils.get_colors(5,basecolor='span')
temp = metdat[stabcol].dropna()
plotdata = temp.groupby([temp.index.month.rename('month'), temp.index.hour.rename('hour')]).value_counts(normalize=True)
plotdata.index.names = ['month','hour','stabclass']
temp = plotdata.reorder_levels(['month','stabclass','hour'])
indexvals = [np.arange(1,13),stabconds, np.arange(24)]
indx = pd.MultiIndex.from_product(indexvals, names=['month','stabclass','hour'])
temp = temp.reindex(index=indx).fillna(0.0)
fig,ax = plt.subplots(4,3, figsize=(9,10), sharex=True, sharey=True)
for ii,month in enumerate(months):
newbottom = np.zeros(24)
for jj,cond in enumerate(stabconds):
pdat = temp.loc[ii+1,cond]
ax.flatten()[ii].bar(hours, pdat, color=colors[jj],bottom=newbottom)
newbottom += pdat
# fig.legend(stabconds, loc=8, bbox_to_anchor=(0, -0.1), edgecolor='w')
fig.text(-0.02,0.58, 'Probability [%]', ha='center', va='center', rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.125), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.21)
fig.text(0.5, 0.165, 'Time of Day [Hour]', ha='center', va='center')
return fig, ax
###########################################
# End of Code
###########################################
| 46.293515
| 190
| 0.665266
|
660a049f3ec33b87d1f4eb5ea8d298baaaafbc2b
| 1,092
|
py
|
Python
|
name_gen.py
|
brahmcapoor/naming-changes-complexity
|
604369ebcb2649b2a7994a1405b944dd6cb37f18
|
[
"MIT"
] | null | null | null |
name_gen.py
|
brahmcapoor/naming-changes-complexity
|
604369ebcb2649b2a7994a1405b944dd6cb37f18
|
[
"MIT"
] | null | null | null |
name_gen.py
|
brahmcapoor/naming-changes-complexity
|
604369ebcb2649b2a7994a1405b944dd6cb37f18
|
[
"MIT"
] | null | null | null |
from random import sample, shuffle, randint, choice
"""
Generates 8 random pairs of names, one simple and one complex.
It's a pretty simple script, so it's not commented.
"""
VOWELS = ['a', 'e', 'i', 'o', 'u']
CONSONANTS = ['s', 'z', 'f', 'v', 'k', 'g', 'p', 'b', 't', 'd']
def generate_random_word(complex=False):
"""
Generates one random word
"""
if complex:
letters = 3
else:
letters = 1
vowels = sample(VOWELS, letters)
consonants = sample(CONSONANTS, letters)
shuffle(vowels)
shuffle(consonants)
return "".join(consonants[i] + vowels[i] for i in range(letters))
def generate_pair_name():
"""
Generates a string of names for a pair
"""
simple = generate_random_word()
while True:
hard = generate_random_word(True)
if simple not in hard:
break
return simple + " " + hard
def main():
names = [generate_pair_name() for i in range(8)]
with open('names.txt', 'wb') as f:
f.writelines(name + '\n' for name in names)
if __name__ == '__main__':
main()
| 20.603774
| 69
| 0.595238
|
345e8084c4e8f87fac27409e079d337af81cfe36
| 3,858
|
py
|
Python
|
src/p1.py
|
BLTowsen/NeuralNetworkFromScratch
|
fe222a2142f66c9bab5a4ff8539e9c4b46d4e597
|
[
"MIT"
] | 1
|
2020-07-22T12:33:39.000Z
|
2020-07-22T12:33:39.000Z
|
src/p1.py
|
BLTowsen/NeuralNetworkFromScratch
|
fe222a2142f66c9bab5a4ff8539e9c4b46d4e597
|
[
"MIT"
] | null | null | null |
src/p1.py
|
BLTowsen/NeuralNetworkFromScratch
|
fe222a2142f66c9bab5a4ff8539e9c4b46d4e597
|
[
"MIT"
] | null | null | null |
import numpy as np
import nnfs
from nnfs.datasets import spiral_data
nnfs.init() # setting random seed and default data type for numpy to use
# X = [[1, 2, 3, 2.5],
# [2.0, 5.0, -1.0, 2.0],
# [-1.5, 2.7, 3.3, -0.8]]
# X = [[1], [2]]
##################################
# Long version of softmax function
##################################
# layer_outputs = [4.8, 1.21, 2.385]
#
# E = 2.71828182846
#
# exp_values = []
# for output in layer_outputs:
# exp_values.append(E ** output)
# print('exponentiated values:')
# print(exp_values)
#
# norm_base = sum(exp_values) #sum all values
# norm_values = []
# for value in exp_values:
# norm_values.append(value/norm_base)
# print('normalized expinentiated values:')
# print(norm_values)
# print('sim of normalized values: ', sum(norm_values))
###################################
# Sleek softmax function
###################################
# layer_outputs = [4.8, 1.21, 2.385]
#
# # For each value in a vector, calculate the exponential value
# exp_values = np.exp(layer_outputs)
# print('exponentiated values:')
# print(exp_values)
#
# # Now normalize values
# norm_values = exp_values / np.sum(exp_values)
# print('normalized exponentiated values:')
# print(norm_values)
#
# print('sum of normalized values: ', np.sum(norm_values))
##########################################
"""
sum(arrayOfArrays, axis=?, keepdims=?) ? = 0, 1 or none
0=columns(adds columns and outputs array)
1 = rows (adds rows and outputs array)
none = adds everything(outputs num)
keepdims = True || False - to keep original dimensions of array or not
"""
# layer_outputs = [[4.8, 1.21, 2.385],
# [8.9, -1.21, 0.2],
# [1.41, 1.051, 0.026]]
# print('so we can sum axis 1, but note the current shape:')
# print(np.sum(layer_outputs, axis=1, keepdims=True))
# X, y = spiral_data(100, 3)
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
def forward(self, inputs):
self.output = np.maximum(0, inputs)
# Softmax activation
class Activation_Softmax:
# Forward pass
def forward(self, inputs):
# Get unnormalized probabilities
exp_values = np.exp(inputs - np.max(inputs, axis=1, keepdims=True)) # changes values so that is negative when exponentiated, so get fractional numbers
# Normalize them for each sample
probabilities = exp_values / np.sum(exp_values, axis=1, keepdims=True)
self.output = probabilities
# Create dataset
X, y = spiral_data(100, 3)
# Create Dense layer with 2 input features and 3 output values
dense1 = Layer_Dense(2, 3) # first dense layer, 2 inputs (each sample has 2 features), 3 outputs
# Create ReLU activation (to be used with Dense layer):
activation1 = Activation_ReLU()
# Create second Dense layer with 3 input features (as we take output of previous layer here) and 3 output values
dense2 = Layer_Dense(3, 3) # second dense layer, 3 inputs, 3 outputs
# Create Softmax activation (to be used with Dense layer):
activation2 = Activation_Softmax()
# Make a forward pass of our training data through this layer
dense1.forward(X)
# Make a forward pass through activation function
# it takes the output of first dense layer here
activation1.forward(dense1.output)
# Make a forward pass through second dense layer
# it takes output of activation function of first layer as inputs
dense2.forward(activation1.output)
# Make a forward pass through activation function
# it takes the output of the second dense layer here
activation2.forward(dense2.output)
# Let's see output of the first few samples:
print(activation2.output[:5])
| 29.007519
| 158
| 0.665111
|
39ddaa5914616bd9ad686fffeb7eef291ee7eb64
| 4,524
|
py
|
Python
|
ekorpkit/models/sentiment/analyser.py
|
entelecheia/ekorpkit
|
400cb15005fdbcaa2ab0c311e338799283f28fe0
|
[
"CC-BY-4.0"
] | 4
|
2022-02-26T10:54:16.000Z
|
2022-02-26T11:01:56.000Z
|
ekorpkit/models/sentiment/analyser.py
|
entelecheia/ekorpkit
|
400cb15005fdbcaa2ab0c311e338799283f28fe0
|
[
"CC-BY-4.0"
] | 1
|
2022-03-25T06:37:12.000Z
|
2022-03-25T06:45:53.000Z
|
ekorpkit/models/sentiment/analyser.py
|
entelecheia/ekorpkit
|
400cb15005fdbcaa2ab0c311e338799283f28fe0
|
[
"CC-BY-4.0"
] | null | null | null |
import logging
import pandas as pd
from .base import BaseSentimentAnalyser
log = logging.getLogger(__name__)
class HIV4SA(BaseSentimentAnalyser):
"""
A class for sentiment analysis using the HIV4 lexicon.
"""
def __init__(self, preprocessor=None, lexicon=None, **kwargs):
super().__init__(preprocessor=preprocessor, lexicon=lexicon, **kwargs)
def _get_score(self, tokens, lexicon_features, feature="polarity"):
"""Get score for features.
:returns: int
"""
lxfeat_names = self._features.get(feature).get("lexicon_features")
lxfeat = pd.DataFrame.from_dict(lexicon_features, orient="index")
score = {}
if feature == "polarity":
lxfeat["pos"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Positiv"] else 0, axis=1
)
lxfeat["neg"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Negativ"] else 0, axis=1
)
lxfeat_agg = lxfeat.agg({"pos": "sum", "neg": "sum"})
polarity = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
lxfeat_agg["pos"] + lxfeat_agg["neg"] + self.EPSILON
)
subjectivity = (lxfeat_agg["pos"] + lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
score["polarity"] = polarity
score["subjectivity"] = subjectivity
elif isinstance(lxfeat_names, str):
lxfeat[feature] = lxfeat.apply(
lambda x: 1 * x["count"] if x[lxfeat_names] else 0, axis=1
)
lxfeat_agg = lxfeat.agg({feature: "sum"})
feat_score = lxfeat_agg[feature] / (len(tokens) + self.EPSILON)
score[feature] = feat_score
return score
def _assign_class(self, score, feature="polarity"):
"""Assign class to a score.
:returns: str
"""
labels = self._features.get(feature).get("labels")
if labels:
score["label"] = ""
for label, thresh in labels.items():
if isinstance(thresh, str):
thresh = eval(thresh)
if score[feature] >= thresh[0] and score[feature] <= thresh[1]:
score["label"] = label
return score
class LMSA(BaseSentimentAnalyser):
"""
A class for sentiment analysis using the LM lexicon.
"""
def __init__(self, preprocessor=None, lexicon=None, **kwargs):
super().__init__(preprocessor=preprocessor, lexicon=lexicon, **kwargs)
def _get_score(self, tokens, lexicon_features, feature="polarity"):
"""Get score for features.
:returns: int
"""
lxfeat_names = self._features.get(feature).get("lexicon_features")
lxfeat = pd.DataFrame.from_dict(lexicon_features, orient="index")
score = {}
if feature == "polarity":
lxfeat["pos"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Positive"] > 0 else 0, axis=1
)
lxfeat["neg"] = lxfeat.apply(
lambda x: 1 * x["count"] if x["Negative"] > 0 else 0, axis=1
)
lxfeat_agg = lxfeat.agg({"pos": "sum", "neg": "sum"})
polarity = (lxfeat_agg["pos"] - lxfeat_agg["neg"]) / (
lxfeat_agg["pos"] + lxfeat_agg["neg"] + self.EPSILON
)
subjectivity = (lxfeat_agg["pos"] + lxfeat_agg["neg"]) / (
len(tokens) + self.EPSILON
)
score[feature] = polarity
score["subjectivity"] = subjectivity
elif isinstance(lxfeat_names, str):
lxfeat[feature] = lxfeat.apply(
lambda x: 1 * x["count"] if x[lxfeat_names] > 0 else 0, axis=1
)
lxfeat_agg = lxfeat.agg({feature: "sum"})
feat_score = lxfeat_agg[feature] / (len(tokens) + self.EPSILON)
score[feature] = feat_score
return score
def _assign_class(self, score, feature="polarity"):
"""Assign class to a score.
:returns: str
"""
label_key = feature + "_label"
labels = self._features.get(feature).get("labels")
if labels:
score[label_key] = ""
for label, thresh in labels.items():
if isinstance(thresh, str):
thresh = eval(thresh)
if score[feature] >= thresh[0] and score[feature] <= thresh[1]:
score[label_key] = label
return score
| 36.780488
| 79
| 0.54443
|
333949faf73bd0077bd9909ad09820ce35ff7a7b
| 2,780
|
py
|
Python
|
rec/mrnas.py
|
OmnesRes/OncoRank
|
38e76cedff9846a0e76aeee5da07258ed2ec3a49
|
[
"MIT"
] | 1
|
2020-03-03T05:11:00.000Z
|
2020-03-03T05:11:00.000Z
|
rec/mrnas.py
|
OmnesRes/OncoRank
|
38e76cedff9846a0e76aeee5da07258ed2ec3a49
|
[
"MIT"
] | null | null | null |
rec/mrnas.py
|
OmnesRes/OncoRank
|
38e76cedff9846a0e76aeee5da07258ed2ec3a49
|
[
"MIT"
] | 1
|
2020-03-04T10:12:42.000Z
|
2020-03-04T10:12:42.000Z
|
import scipy.stats as stats
import numpy as np
import math
from rpy2 import robjects as ro
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
cancers=['BLCA','BRCA','CESC','COAD','ESCA','GBM','HNSC','KIRC','KIRP','LAML','LGG','LIHC','LUAD','LUSC','OV',\
'PAAD','READ','SARC','SKCM','STAD','UCEC']
genes={}
for cancer in cancers:
##path to https://github.com/OmnesRes/onco_lnc repository
f=open(os.path.join(BASE_DIR,'onco_lnc','mrna','cox',cancer,'coeffs_pvalues.txt'))
data=sorted([[j.split('\t')[0],float(j.split('\t')[-2])] for j in f],key=lambda x:x[-1])
for index,i in enumerate(data):
genes[i[0]]=genes.get(i[0],[])+[(index+1)/float(len(data))-.5*1/float(len(data))]
chi_stats={}
for i in genes:
stat=-2*sum([math.log(k) for k in genes[i]])
chi_stats[i]=[len(genes[i]),stats.chi2.sf(stat,len(genes[i])*2)]
genes_reversed={}
for cancer in cancers:
##path to https://github.com/OmnesRes/onco_lnc repository
f=open(os.path.join(BASE_DIR,'onco_lnc','mrna','cox',cancer,'coeffs_pvalues.txt'))
data=sorted([[j.split('\t')[0],float(j.split('\t')[-2])] for j in f],key=lambda x:x[-1])
for index,i in enumerate(data):
genes_reversed[i[0]]=genes_reversed.get(i[0],[])+[(len(data)-index)/float(len(data))-.5*1/float(len(data))]
chi_stats_reversed={}
for i in genes_reversed:
stat=-2*sum([math.log(k) for k in genes_reversed[i]])
chi_stats_reversed[i]=[len(genes_reversed[i]),stats.chi2.sf(stat,len(genes_reversed[i])*2)]
print len(chi_stats)
print len(chi_stats_reversed)
merged=[]
for i in chi_stats:
if chi_stats[i][0]>=8:
if chi_stats[i][1]<chi_stats_reversed[i][1]:
merged.append([i,math.log(2*chi_stats[i][1],10),2*chi_stats[i][1]])
elif chi_stats[i][1]>chi_stats_reversed[i][1]:
merged.append([i,-1*math.log(2*chi_stats_reversed[i][1],10),2*chi_stats_reversed[i][1]])
else:
merged.append([i,0,2*chi_stats_reversed[i][1]])
print i,chi_stats_reversed[i][1]
print len(merged)
f=open(os.path.join(BASE_DIR,'onco_rank','oncolnc_mrna_names.txt'))
data=[eval(i.strip()) for i in f]
name_dict={}
for i in data:
name_dict[i[0]]=i[1:]
print len(name_dict)
merged.sort(key=lambda x:x[-1])
##BH correction
pvalues=[i[-1] for i in merged]
#prepare data for R
ro.globalenv['pvalues']=ro.FloatVector(pvalues)
#perform BH adjustment
res=ro.r('p.adjust(pvalues,"BH")')
#extract data
adjusted=list(res)
f=open('mrna_ranks.txt','w')
for i,fdr in zip(merged,adjusted):
f.write(i[0])
f.write('\t')
for j in name_dict[i[0]]:
f.write(j)
f.write('\t')
f.write(str(i[1]))
f.write('\t')
f.write(str(fdr))
f.write('\n')
f.close()
| 29.892473
| 115
| 0.641007
|
c29c5d8f5096365929c2719bf1a0c0b792a500d5
| 4,748
|
py
|
Python
|
observations/models.py
|
UiL-OTS-labs/ethics
|
7782e08a4f772b375ba56b4b865fa9efcd730ca1
|
[
"MIT"
] | 2
|
2017-04-22T11:07:13.000Z
|
2018-03-02T12:23:24.000Z
|
observations/models.py
|
UiL-OTS-labs/ethics
|
7782e08a4f772b375ba56b4b865fa9efcd730ca1
|
[
"MIT"
] | 124
|
2020-04-30T07:06:58.000Z
|
2022-03-28T12:50:16.000Z
|
observations/models.py
|
UiL-OTS-labs/etcl
|
a22df7ff78620b704a500354fb218fbe9bcabf5f
|
[
"MIT"
] | 1
|
2021-08-04T11:44:21.000Z
|
2021-08-04T11:44:21.000Z
|
from django.core.validators import MaxValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from main.models import SettingModel
from main.validators import validate_pdf_or_doc
from studies.models import Study
class Registration(models.Model):
order = models.PositiveIntegerField(unique=True)
description = models.CharField(max_length=200)
needs_details = models.BooleanField(default=False)
requires_review = models.BooleanField(default=False)
class Meta:
ordering = ['order']
def __str__(self):
return self.description
class Observation(SettingModel):
# This is used internally to provide backwards compatibility with the old
# version of this model. All old fields are still used if this is 1.
version = models.PositiveIntegerField(
'INTERNAL - Describes which version of the observation model is used',
default=2)
details_who = models.TextField(
_('Beschrijf <b>wie</b> er wordt geobserveerd.'),
help_text=_(
'Maak duidelijk voor de commissie wie er wordt geobserveerd en wat er precies van de deelnemer wordt'
' geobserveerd. Bijvoorbeeld: De leraar zal geobserveerd worden. De observatie moet de interactie '
'tussen leraar en leerling in kaart brengen.'),
blank=True,
)
details_why = models.TextField(
_('Beschrijf <b>waarom</b> er wordt geobserveerd.'),
help_text=_(
'Wat is het doel van de observatie? Bijvoorbeeld: Het doel van de '
'observatie is inzicht te krijgen in hoe de leerkracht omgaat met '
'de uitleg van de nieuwe lesmethode. Doet die dat op de gewenste '
'manier en in begrijpelijke taal?'),
blank=True,
)
details_frequency = models.TextField(
_(
'Beschrijf <b>hoe vaak en hoe lang</b> de observant wordt geobserveerd.'),
help_text=_('Bijvoorbeeld: De leraar zal 5 lessen van 45 minuten '
'worden geobserveerd.'),
blank=True,
)
is_anonymous = models.BooleanField(
_('Wordt er anoniem geobserveerd?'),
help_text=_(
'Zoals zou kunnen voorkomen op fora en de onderzoeker ook een account heeft.'),
default=False,
)
is_anonymous_details = models.TextField(
_('Licht toe'),
blank=True,
)
is_in_target_group = models.BooleanField(
_('Doet de onderzoeker zich voor als behorende tot de doelgroep?'),
default=False,
)
is_in_target_group_details = models.TextField(
_('Licht toe'),
blank=True,
)
is_nonpublic_space = models.BooleanField(
_('Wordt er geobserveerd in een niet-openbare ruimte?'),
help_text=_('Bijvoorbeeld er wordt geobserveerd bij iemand thuis, \
tijdens een hypotheekgesprek, tijdens politieverhoren of een forum waar \
een account voor moet worden aangemaakt.'),
default=False,
)
is_nonpublic_space_details = models.TextField(
_('Licht toe'),
blank=True,
)
has_advanced_consent = models.BooleanField(
_('Vindt informed consent van tevoren plaats?'),
default=True,
)
has_advanced_consent_details = models.TextField(
_(
'Leg uit waarom informed consent niet van te voren plaatsvindt en '
'geef ook op welke wijze dit achteraf verzorgd wordt.'
),
blank=True,
)
needs_approval = models.BooleanField(
_('Heb je toestemming nodig van een (samenwerkende) instantie \
om deze observatie te mogen uitvoeren?'),
default=False,
)
approval_institution = models.CharField(
_('Welke instantie?'),
max_length=200,
blank=True,
)
approval_document = models.FileField(
_('Upload hier het toestemmingsdocument (in .pdf of .doc(x)-formaat)'),
blank=True,
validators=[validate_pdf_or_doc],
)
registrations = models.ManyToManyField(
Registration,
verbose_name=_('Hoe wordt het gedrag geregistreerd?'))
registrations_details = models.CharField(
_('Namelijk'),
max_length=200,
blank=True)
# Legacy, only used in v1
days = models.PositiveIntegerField(
_('Op hoeveel dagen wordt er geobserveerd (per deelnemer)?'),
blank=True,
null=True)
mean_hours = models.DecimalField(
_('Hoeveel uur wordt er gemiddeld per dag geobserveerd?'),
max_digits=4,
decimal_places=2,
validators=[MaxValueValidator(24)],
blank=True,
null=True)
# References
study = models.OneToOneField(
Study,
on_delete=models.CASCADE)
| 32.081081
| 113
| 0.65417
|
1c30bad2d352bf9d602223667dd3a7db097949ed
| 4,347
|
py
|
Python
|
src/sage/server/notebook/colorize.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | 2
|
2021-08-20T00:30:35.000Z
|
2021-11-17T10:54:00.000Z
|
src/sage/server/notebook/colorize.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
src/sage/server/notebook/colorize.py
|
bopopescu/sage-5
|
9d85b34956ca2edd55af307f99c5d3859acd30bf
|
[
"BSL-1.0"
] | null | null | null |
# This file is part of the OLD Sage notebook and is NOT actively developed,
# maintained, or supported. As of Sage v4.1.2, all notebook development has
# moved to the separate Sage Notebook project:
#
# http://nb.sagemath.org/
#
# The new notebook is installed in Sage as an spkg (e.g., sagenb-0.3.spkg).
#
# Please visit the project's home page for more information, including directions on
# obtaining the latest source code. For notebook-related development and support,
# please consult the sage-notebook discussion group:
#
# http://groups.google.com/group/sage-notebook
"""nodoctest
"""
# -*- coding: utf-8 -*-
#############################################################################
# Copyright (C) 2007 William Stein <wstein@gmail.com>
# Distributed under the terms of the GNU General Public License (GPL)
# The full text of the GPL is available at:
# http://www.gnu.org/licenses/
#############################################################################
"""
Colorize - Python source formatter that outputs Python code in XHTML.
This script is based on MoinMoin - The Python Source Parser.
FROM: Modified version of
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442482
"""
# Imports
import cgi
import string
import sys
import cStringIO
import keyword
import token
import tokenize
#Set up basic values.
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
_classes = {
token.NUMBER: 'token_number',
token.OP: 'token_op',
token.STRING: 'token_string',
tokenize.COMMENT: 'token_comment',
token.NAME: 'token_name',
token.ERRORTOKEN: 'token_error',
_KEYWORD: 'keyword',
_TEXT: 'text',
}
class Parser:
""" Send colored python source.
"""
def __init__(self, raw, out = sys.stdout):
""" Store the source text.
"""
self.raw = string.strip(string.expandtabs(raw))
self.out = out
def format(self, formatter, form):
""" Parse and send the colored source.
"""
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
while 1:
pos = string.find(self.raw, '\n', pos) + 1
if not pos: break
self.lines.append(pos)
self.lines.append(len(self.raw))
# parse the source and write it
self.pos = 0
text = cStringIO.StringIO(self.raw)
try:
tokenize.tokenize(text.readline, self)
except tokenize.TokenError, ex:
msg = ex[0]
line = ex[1][0]
self.out.write("<h3>ERROR: %s</h3>%s\n" % (
msg, self.raw[self.lines[line]:]))
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
""" Token handler.
"""
if 0:
print "type", toktype, token.tok_name[toktype], "text", toktext,
print "start", srow, scol, "end", erow, ecol, "<br />"
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# handle newlines
if toktype in [token.NEWLINE, tokenize.NL]:
self.out.write('\n')
return
# send the original whitespace, if needed
if newpos > oldpos:
self.out.write(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color/class group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
classval = _classes.get(toktype, _classes[_TEXT])
style = ''
if toktype == token.ERRORTOKEN:
style = ' style="border: solid 1.5pt #FF0000;"'
# send text
self.out.write('<span class="%s"%s>' % (classval, style))
self.out.write(cgi.escape(toktext))
self.out.write('</span>')
def colorize(source):
import os, sys
# write colorized version to "[filename].py.html"
html = cStringIO.StringIO()
Parser(source, html).format(None, None)
html.flush()
html.seek(0)
return html.read()
| 30.1875
| 84
| 0.576259
|
81ea9d65797320e42006a33cdc8c858186bee79a
| 198
|
py
|
Python
|
ex015.py
|
AdaltonVitor/python-curso-em-video
|
b7c0b5f72e22c0ea045ca0d47bb577e9832e76f3
|
[
"MIT"
] | 1
|
2022-01-02T21:59:04.000Z
|
2022-01-02T21:59:04.000Z
|
ex015.py
|
AdaltonVitor/python-curso-em-video
|
b7c0b5f72e22c0ea045ca0d47bb577e9832e76f3
|
[
"MIT"
] | null | null | null |
ex015.py
|
AdaltonVitor/python-curso-em-video
|
b7c0b5f72e22c0ea045ca0d47bb577e9832e76f3
|
[
"MIT"
] | null | null | null |
salario = float(input('Qual o salario do funcionario? R$'))
novo = salario + (salario * 15 / 100)
print('O salario do funcionario era R${} e com o ajuste de 15% foi para R${} '.format(salario,novo))
| 66
| 100
| 0.686869
|
3267ce7e719235cd196603a526872cbd9cdcfaa0
| 12,587
|
py
|
Python
|
selfdrive/car/gm/values.py
|
tunabelly/openpilot
|
cbd4731145ebba06f6fab822c5e23e146eaa562f
|
[
"MIT"
] | 1
|
2019-05-24T22:04:41.000Z
|
2019-05-24T22:04:41.000Z
|
selfdrive/car/gm/values.py
|
tunabelly/openpilot
|
cbd4731145ebba06f6fab822c5e23e146eaa562f
|
[
"MIT"
] | null | null | null |
selfdrive/car/gm/values.py
|
tunabelly/openpilot
|
cbd4731145ebba06f6fab822c5e23e146eaa562f
|
[
"MIT"
] | null | null | null |
from cereal import car
from selfdrive.car import dbc_dict
class CAR:
HOLDEN_ASTRA = "HOLDEN ASTRA RS-V BK 2017"
VOLT = "CHEVROLET VOLT PREMIER 2017"
CADILLAC_ATS = "CADILLAC ATS Premium Performance 2018"
CADILLAC_CT6 = "CADILLAC CT6 SUPERCRUISE 2018"
MALIBU = "CHEVROLET MALIBU PREMIER 2017"
ACADIA = "GMC ACADIA DENALI 2018"
BUICK_REGAL = "BUICK REGAL ESSENCE 2018"
YUKON = "GMC YUKON DENALI 2017" # MSA, YUKON
SUPERCRUISE_CARS = [CAR.CADILLAC_CT6]
class CruiseButtons:
UNPRESS = 1
RES_ACCEL = 2
DECEL_SET = 3
MAIN = 5
CANCEL = 6
class AccState:
OFF = 0
ACTIVE = 1
FAULTED = 3
STANDSTILL = 4
def is_eps_status_ok(eps_status, car_fingerprint):
valid_eps_status = []
if car_fingerprint in SUPERCRUISE_CARS:
valid_eps_status += [0, 1, 4, 5, 6]
else:
valid_eps_status += [0, 1]
return eps_status in valid_eps_status
def parse_gear_shifter(can_gear):
if can_gear == 0:
return car.CarState.GearShifter.park
elif can_gear == 1:
return car.CarState.GearShifter.neutral
elif can_gear == 2:
return car.CarState.GearShifter.drive
elif can_gear == 3:
return car.CarState.GearShifter.reverse
else:
return car.CarState.GearShifter.unknown
FINGERPRINTS = {
# Astra BK MY17, ASCM unplugged
CAR.HOLDEN_ASTRA: [{
190: 8, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 8, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 8, 398: 8, 401: 8, 413: 8, 417: 8, 419: 8, 422: 1, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 8, 455: 7, 456: 8, 458: 5, 479: 8, 481: 7, 485: 8, 489: 8, 497: 8, 499: 3, 500: 8, 501: 8, 508: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 647: 5, 707: 8, 715: 8, 723: 8, 753: 5, 761: 7, 806: 1, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1009: 8, 1011: 6, 1017: 8, 1019: 3, 1020: 8, 1105: 6, 1217: 8, 1221: 5, 1225: 8, 1233: 8, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 8, 1280: 4, 1300: 8, 1328: 4, 1417: 8, 1906: 7, 1907: 7, 1908: 7, 1912: 7, 1919: 7,
}],
CAR.VOLT: [
# Volt Premier w/ ACC 2017
{
170: 8, 171: 8, 189: 7, 190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 288: 5, 289: 8, 298: 8, 304: 1, 308: 4, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 389: 2, 390: 7, 417: 7, 419: 1, 426: 7, 451: 8, 452: 8, 453: 6, 454: 8, 456: 8, 479: 3, 481: 7, 485: 8, 489: 8, 493: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 528: 4, 532: 6, 546: 7, 550: 8, 554: 3, 558: 8, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 566: 5, 567: 3, 568: 1, 573: 1, 577: 8, 647: 3, 707: 8, 711: 6, 715: 8, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 961: 8, 969: 8, 977: 8, 979: 7, 988: 6, 989: 8, 995: 7, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1105: 6, 1187: 4, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1227: 4, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1273: 3, 1275: 3, 1280: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1905: 7, 1906: 7, 1907: 7, 1910: 7, 1912: 7, 1922: 7, 1927: 7, 1928: 7, 2016: 8, 2020: 8, 2024: 8, 2028: 8
},
# Volt Premier w/ ACC 2018
{
170: 8, 171: 8, 189: 7, 190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 288: 5, 298: 8, 304: 1, 308: 4, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 389: 2, 390: 7, 417: 7, 419: 1, 426: 7, 451: 8, 452: 8, 453: 6, 454: 8, 456: 8, 479: 3, 481: 7, 485: 8, 489: 8, 493: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 528: 4, 532: 6, 546: 7, 550: 8, 554: 3, 558: 8, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 566: 5, 567: 3, 568: 1, 573: 1, 577: 8, 578: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 3, 707: 8, 711: 6, 715: 8, 717: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 967: 4, 969: 8, 977: 8, 979: 7, 988: 6, 989: 8, 995: 7, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1187: 4, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1227: 4, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1273: 3, 1275: 3, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1516: 8, 1601: 8, 1618: 8, 1905: 7, 1906: 7, 1907: 7, 1910: 7, 1912: 7, 1922: 7, 1927: 7, 1930: 7, 2016: 8, 2018: 8, 2020: 8, 2024: 8, 2028: 8
}],
CAR.BUICK_REGAL : [
# Regal TourX Essence w/ ACC 2018
{
190: 8, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 8, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 8, 419: 8, 422: 4, 426: 8, 431: 8, 442: 8, 451: 8, 452: 8, 453: 8, 455: 7, 456: 8, 463: 3, 479: 8, 481: 7, 485: 8, 487: 8, 489: 8, 495: 8, 497: 8, 499: 3, 500: 8, 501: 8, 508: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 569: 3, 573: 1, 577: 8, 578: 8, 579: 8, 587: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 3, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 882: 8, 884: 8, 890: 1, 892: 2, 893: 2, 894: 1, 961: 8, 967: 8, 969: 8, 977: 8, 979: 8, 985: 8, 1001: 8, 1005: 6, 1009: 8, 1011: 8, 1013: 3, 1017: 8, 1020: 8, 1024: 8, 1025: 8, 1026: 8, 1027: 8, 1028: 8, 1029: 8, 1030: 8, 1031: 8, 1032: 2, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 8, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1259: 8, 1261: 8, 1263: 8, 1265: 8, 1267: 8, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1601: 8, 1602: 8, 1603: 7, 1611: 8, 1618: 8, 1906: 8, 1907: 7, 1912: 7, 1914: 7, 1916: 7, 1919: 7, 1930: 7, 2016: 8, 2018: 8, 2019: 8, 2024: 8, 2026: 8
}],
CAR.CADILLAC_ATS: [
# Cadillac ATS Coupe Premium Performance 3.6L RWD w/ ACC 2018
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 322: 7, 328: 1, 352: 5, 368: 3, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 401: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 462: 4, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 491: 2, 493: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 528: 5, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 719: 5, 723: 2, 753: 5, 761: 7, 801: 8, 804: 3, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 882: 8, 890: 1, 892: 2, 893: 2, 894: 1, 961: 8, 967: 4, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1011: 6, 1013: 3, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1233: 8, 1241: 3, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1271: 8, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1904: 7, 1906: 7, 1907: 7, 1912: 7, 1916: 7, 1917: 7, 1918: 7, 1919: 7, 1920: 7, 1930: 7, 2016: 8, 2024: 8
}],
CAR.CADILLAC_CT6: [{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 313: 8, 320: 3, 322: 7, 328: 1, 336: 1, 338: 6, 340: 6, 352: 5, 354: 5, 356: 8, 368: 3, 372: 5, 381: 8, 386: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 458: 5, 460: 5, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 528: 5, 532: 6, 534: 2, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 569: 3, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 719: 5, 723: 2, 753: 5, 761: 7, 800: 6, 801: 8, 804: 3, 810: 8, 832: 8, 833: 8, 834: 8, 835: 6, 836: 5, 837: 8, 838: 8, 839: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 884: 8, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1011: 6, 1013: 1, 1017: 8, 1019: 2, 1020: 8, 1105: 6, 1217: 8, 1221: 5, 1223: 3, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1259: 8, 1261: 7, 1263: 4, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1918: 7, 1919: 7, 1934: 7, 2016: 8, 2024: 8
}],
CAR.MALIBU: [
# Malibu Premier w/ ACC 2017
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 298: 8, 304: 1, 309: 8, 311: 8, 313: 8, 320: 3, 328: 1, 352: 5, 381: 6, 384: 4, 386: 8, 388: 8, 393: 7, 398: 8, 407: 7, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 455: 7, 456: 8, 479: 3, 481: 7, 485: 8, 487: 8, 489: 8, 495: 4, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 528: 5, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 565: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 810: 8, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1013: 3, 1017: 8, 1019: 2, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1223: 2, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1919: 7, 1930: 7, 2016: 8, 2024: 8,
}],
CAR.ACADIA: [
# Acadia Denali w/ /ACC 2018
{
190: 6, 193: 8, 197: 8, 199: 4, 201: 8, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 289: 8, 298: 8, 304: 1, 309: 8, 313: 8, 320: 3, 322: 7, 328: 1, 338: 6, 340: 6, 352: 5, 381: 8, 384: 4, 386: 8, 388: 8, 393: 8, 398: 8, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 454: 8, 455: 7, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 489: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 532: 6, 554: 3, 560: 8, 562: 8, 563: 5, 564: 5, 567: 5, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 753: 5, 761: 7, 840: 5, 842: 5, 844: 8, 866: 4, 869: 4, 880: 6, 961: 8, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1020: 8, 1033: 7, 1034: 7, 1105: 6, 1217: 8, 1221: 5, 1225: 8, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1328: 4, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1919: 7, 1920: 7, 1930: 7, 2016: 8, 2024: 8
}],
CAR.YUKON: [{
# Yukon Denali w/ ACC 2017
170: 8, 190: 6, 192: 5, 193: 8, 197: 8, 199: 4, 201: 6, 208: 8, 209: 7, 211: 2, 241: 6, 249: 8, 288: 5, 289: 1, 290: 1, 298: 8, 304: 8, 309: 8, 311: 8, 313: 8, 320: 8, 322: 7, 328: 1, 352: 7, 368: 8, 381: 6, 386: 8, 388: 8, 393: 7, 398: 8, 407: 4, 413: 8, 417: 7, 419: 1, 422: 4, 426: 7, 431: 8, 442: 8, 451: 8, 452: 8, 453: 6, 454: 8, 455: 7, 460: 5, 462: 4, 463: 3, 479: 3, 481: 7, 485: 8, 487: 8, 489: 5, 493: 8, 497: 8, 499: 3, 500: 6, 501: 8, 508: 8, 510: 8, 512: 3, 528: 5, 530: 8, 532: 6, 534: 2, 536: 5, 562: 8, 563: 5, 564: 5, 570: 1, 573: 1, 577: 8, 608: 8, 609: 6, 610: 6, 611: 6, 612: 8, 613: 8, 647: 6, 707: 8, 715: 8, 717: 5, 761: 7, 789: 5, 800: 6, 801: 8, 803: 8, 804: 3, 810: 8, 832: 8, 840: 5, 842: 6, 844: 8, 848: 4, 866: 4, 869: 4, 880: 6, 961: 8, 967: 4, 969: 8, 977: 8, 979: 8, 985: 5, 1001: 8, 1005: 6, 1009: 8, 1017: 8, 1019: 2, 1020: 8, 1105: 6, 1217: 8, 1221: 5, 1223: 2, 1225: 7, 1233: 8, 1249: 8, 1257: 6, 1265: 8, 1267: 1, 1280: 4, 1296: 4, 1300: 8, 1322: 6, 1323: 4, 1328: 4, 1417: 8, 1601: 8, 1906: 7, 1907: 7, 1912: 7, 1914: 7, 1915: 7, 1918: 7, 1919: 7, 1920: 7, 1925: 7, 2016: 8, 2024: 8
}],
}
STEER_THRESHOLD = 1.0
class ECU:
CAM = 0
ECU_FINGERPRINT = {
#ECU.CAM: [384, 715] # 384 = "ASCMLKASteeringCmd", 715 = "ASCMGasRegenCmd"
# MSA
ECU.CAM: [384] # 384 = "ASCMLKASteeringCmd", 715 = "ASCMGasRegenCmd"
}
DBC = {
CAR.HOLDEN_ASTRA: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.VOLT: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.MALIBU: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.ACADIA: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.CADILLAC_ATS: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.BUICK_REGAL: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
CAR.CADILLAC_CT6: dbc_dict('cadillac_ct6_powertrain', 'cadillac_ct6_object', chassis_dbc='cadillac_ct6_chassis'),
CAR.YUKON: dbc_dict('gm_global_a_powertrain', 'gm_global_a_object', chassis_dbc='gm_global_a_chassis'),
}
| 111.389381
| 1,265
| 0.559943
|
c61ab811738772f57059cce027a2c76c5a307c87
| 1,246
|
py
|
Python
|
azure-devops/azext_devops/devops_sdk/v5_1/customer_intelligence/models.py
|
keithlemon/azure-devops-cli-extension
|
4989e5f53650f186e638ccc186605986c76d59bf
|
[
"MIT"
] | 326
|
2019-04-10T12:38:23.000Z
|
2022-03-31T23:07:49.000Z
|
azure-devops/azext_devops/devops_sdk/v5_1/customer_intelligence/models.py
|
keithlemon/azure-devops-cli-extension
|
4989e5f53650f186e638ccc186605986c76d59bf
|
[
"MIT"
] | 562
|
2019-04-10T07:36:12.000Z
|
2022-03-28T07:37:54.000Z
|
azure-devops/azext_devops/devops_sdk/v5_1/customer_intelligence/models.py
|
keithlemon/azure-devops-cli-extension
|
4989e5f53650f186e638ccc186605986c76d59bf
|
[
"MIT"
] | 166
|
2019-04-10T07:59:40.000Z
|
2022-03-16T14:17:13.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class CustomerIntelligenceEvent(Model):
"""
:param area:
:type area: str
:param feature:
:type feature: str
:param properties:
:type properties: dict
"""
_attribute_map = {
'area': {'key': 'area', 'type': 'str'},
'feature': {'key': 'feature', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{object}'}
}
def __init__(self, area=None, feature=None, properties=None):
super(CustomerIntelligenceEvent, self).__init__()
self.area = area
self.feature = feature
self.properties = properties
__all__ = [
'CustomerIntelligenceEvent',
]
| 32.789474
| 95
| 0.501605
|
a5f87d0dd4cdc7ca57c5c6795a22fb2b97eb3b7f
| 4,429
|
py
|
Python
|
package_control/versions.py
|
William-Cao/Less-
|
b4cb03d2457bcac29ba3cfc13689098aee103971
|
[
"Unlicense",
"MIT"
] | 3
|
2019-06-06T00:13:44.000Z
|
2020-08-16T20:11:13.000Z
|
package_control/versions.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | null | null | null |
package_control/versions.py
|
Allyn69/package_control
|
f78578ed67529e263fb1f4e4f90f92295830560f
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-26T00:35:53.000Z
|
2021-07-26T00:35:53.000Z
|
import re
from .semver import SemVer
from .console_write import console_write
def semver_compat(v):
"""
Converts a string version number into SemVer. If the version is based on
a date, converts to 0.0.1+yyyy.mm.dd.hh.mm.ss.
:param v:
A string, dict with 'version' key, or a SemVer object
:return:
A string that is a valid semantic version number
"""
if isinstance(v, SemVer):
# SemVer only defined __str__, not __unicode__, so we always use str()
return str(v)
# Allowing passing in a dict containing info about a package
if isinstance(v, dict):
if 'version' not in v:
return '0'
v = v['version']
# Trim v off of the front
v = re.sub('^v', '', v)
# We prepend 0 to all date-based version numbers so that developers
# may switch to explicit versioning from GitHub/BitBucket
# versioning based on commit dates.
#
# When translating dates into semver, the way to get each date
# segment into the version is to treat the year and month as
# minor and patch, and then the rest as a numeric build version
# with four different parts. The result looks like:
# 0.2012.11+10.31.23.59
date_match = re.match('(\d{4})\.(\d{2})\.(\d{2})\.(\d{2})\.(\d{2})\.(\d{2})$', v)
if date_match:
v = '0.0.1+%s.%s.%s.%s.%s.%s' % date_match.groups()
# This handles version that were valid pre-semver with 4+ dotted
# groups, such as 1.6.9.0
four_plus_match = re.match('(\d+\.\d+\.\d+)[T\.](\d+(\.\d+)*)$', v)
if four_plus_match:
v = '%s+%s' % (four_plus_match.group(1), four_plus_match.group(2))
# Semver must have major, minor, patch
elif re.match('^\d+$', v):
v += '.0.0'
elif re.match('^\d+\.\d+$', v):
v += '.0'
return v
def version_comparable(string):
return SemVer(semver_compat(string))
def version_exclude_prerelease(versions):
"""
Remove prerelease versions for a list of SemVer versions
:param versions:
The list of versions to filter
:return:
The list of versions with pre-releases removed
"""
output = []
for version in versions:
if SemVer(semver_compat(version)).prerelease is not None:
continue
output.append(version)
return output
def version_process(versions, filter_prefix):
"""
Filter a list of versions to ones that are valid SemVers, if a prefix
is provided, only match versions starting with the prefix and split
:param versions:
The list of versions to filter
:param filter_prefix:
Remove this prefix from the version before checking if it is a valid
SemVer. If this prefix is not present, skip the version.
:return:
A list of dicts, each of which has the keys "version" and "prefix"
"""
output = []
for version in versions:
prefix = ''
if filter_prefix:
if version[0:len(filter_prefix)] != filter_prefix:
continue
check_version = version[len(filter_prefix):]
prefix = filter_prefix
else:
check_version = re.sub('^v', '', version)
if check_version != version:
prefix = 'v'
if not SemVer.valid(check_version):
continue
output.append({'version': check_version, 'prefix': prefix})
return output
def version_sort(sortable, *fields, **kwargs):
"""
Sorts a list that is a list of versions, or dicts with a 'version' key.
Can also secondly sort by another field.
:param sortable:
The list to sort
:param *fields:
If sortable is a list of dicts, perform secondary sort via these fields,
in order
:param **kwargs:
Keyword args to pass on to sorted()
:return:
A copy of sortable that is sorted according to SemVer rules
"""
def _version_sort_key(item):
result = SemVer(semver_compat(item))
if fields:
values = [result]
for field in fields:
values.append(item[field])
result = tuple(values)
return result
try:
return sorted(sortable, key=_version_sort_key, **kwargs)
except (ValueError) as e:
console_write(
u'''
Error sorting versions - %s
''',
e
)
return []
| 28.031646
| 85
| 0.600813
|
7730053bcdd4204539a300d9a33cbed7e734f38f
| 12,813
|
py
|
Python
|
huaweicloud-sdk-cloudpipeline/huaweicloudsdkcloudpipeline/v2/model/template_view.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-cloudpipeline/huaweicloudsdkcloudpipeline/v2/model/template_view.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-cloudpipeline/huaweicloudsdkcloudpipeline/v2/model/template_view.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class TemplateView:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'template_id': 'str',
'template_name': 'str',
'template_type': 'str',
'template_url': 'str',
'user_id': 'str',
'user_name': 'str',
'domain_id': 'str',
'domain_name': 'str',
'is_build_in': 'bool',
'region': 'str',
'project_id': 'str',
'project_name': 'str',
'is_watch': 'bool',
'description': 'str',
'parameter': 'TemplateParam',
'flow': 'FlowItem',
'states': 'StateItem'
}
attribute_map = {
'template_id': 'template_id',
'template_name': 'template_name',
'template_type': 'template_type',
'template_url': 'template_url',
'user_id': 'user_id',
'user_name': 'user_name',
'domain_id': 'domain_id',
'domain_name': 'domain_name',
'is_build_in': 'is_build_in',
'region': 'region',
'project_id': 'project_id',
'project_name': 'project_name',
'is_watch': 'is_watch',
'description': 'description',
'parameter': 'parameter',
'flow': 'flow',
'states': 'states'
}
def __init__(self, template_id=None, template_name=None, template_type=None, template_url=None, user_id=None, user_name=None, domain_id=None, domain_name=None, is_build_in=None, region=None, project_id=None, project_name=None, is_watch=None, description=None, parameter=None, flow=None, states=None):
"""TemplateView - a model defined in huaweicloud sdk"""
self._template_id = None
self._template_name = None
self._template_type = None
self._template_url = None
self._user_id = None
self._user_name = None
self._domain_id = None
self._domain_name = None
self._is_build_in = None
self._region = None
self._project_id = None
self._project_name = None
self._is_watch = None
self._description = None
self._parameter = None
self._flow = None
self._states = None
self.discriminator = None
self.template_id = template_id
self.template_name = template_name
self.template_type = template_type
self.template_url = template_url
self.user_id = user_id
self.user_name = user_name
self.domain_id = domain_id
self.domain_name = domain_name
self.is_build_in = is_build_in
self.region = region
self.project_id = project_id
self.project_name = project_name
self.is_watch = is_watch
self.description = description
self.parameter = parameter
self.flow = flow
self.states = states
@property
def template_id(self):
"""Gets the template_id of this TemplateView.
模板ID
:return: The template_id of this TemplateView.
:rtype: str
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this TemplateView.
模板ID
:param template_id: The template_id of this TemplateView.
:type: str
"""
self._template_id = template_id
@property
def template_name(self):
"""Gets the template_name of this TemplateView.
模板名字
:return: The template_name of this TemplateView.
:rtype: str
"""
return self._template_name
@template_name.setter
def template_name(self, template_name):
"""Sets the template_name of this TemplateView.
模板名字
:param template_name: The template_name of this TemplateView.
:type: str
"""
self._template_name = template_name
@property
def template_type(self):
"""Gets the template_type of this TemplateView.
模板类型
:return: The template_type of this TemplateView.
:rtype: str
"""
return self._template_type
@template_type.setter
def template_type(self, template_type):
"""Sets the template_type of this TemplateView.
模板类型
:param template_type: The template_type of this TemplateView.
:type: str
"""
self._template_type = template_type
@property
def template_url(self):
"""Gets the template_url of this TemplateView.
模板编辑URL
:return: The template_url of this TemplateView.
:rtype: str
"""
return self._template_url
@template_url.setter
def template_url(self, template_url):
"""Sets the template_url of this TemplateView.
模板编辑URL
:param template_url: The template_url of this TemplateView.
:type: str
"""
self._template_url = template_url
@property
def user_id(self):
"""Gets the user_id of this TemplateView.
用户ID
:return: The user_id of this TemplateView.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this TemplateView.
用户ID
:param user_id: The user_id of this TemplateView.
:type: str
"""
self._user_id = user_id
@property
def user_name(self):
"""Gets the user_name of this TemplateView.
用户名字
:return: The user_name of this TemplateView.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""Sets the user_name of this TemplateView.
用户名字
:param user_name: The user_name of this TemplateView.
:type: str
"""
self._user_name = user_name
@property
def domain_id(self):
"""Gets the domain_id of this TemplateView.
租户ID
:return: The domain_id of this TemplateView.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this TemplateView.
租户ID
:param domain_id: The domain_id of this TemplateView.
:type: str
"""
self._domain_id = domain_id
@property
def domain_name(self):
"""Gets the domain_name of this TemplateView.
租户名字
:return: The domain_name of this TemplateView.
:rtype: str
"""
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""Sets the domain_name of this TemplateView.
租户名字
:param domain_name: The domain_name of this TemplateView.
:type: str
"""
self._domain_name = domain_name
@property
def is_build_in(self):
"""Gets the is_build_in of this TemplateView.
是否内置模板
:return: The is_build_in of this TemplateView.
:rtype: bool
"""
return self._is_build_in
@is_build_in.setter
def is_build_in(self, is_build_in):
"""Sets the is_build_in of this TemplateView.
是否内置模板
:param is_build_in: The is_build_in of this TemplateView.
:type: bool
"""
self._is_build_in = is_build_in
@property
def region(self):
"""Gets the region of this TemplateView.
region
:return: The region of this TemplateView.
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this TemplateView.
region
:param region: The region of this TemplateView.
:type: str
"""
self._region = region
@property
def project_id(self):
"""Gets the project_id of this TemplateView.
项目ID
:return: The project_id of this TemplateView.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this TemplateView.
项目ID
:param project_id: The project_id of this TemplateView.
:type: str
"""
self._project_id = project_id
@property
def project_name(self):
"""Gets the project_name of this TemplateView.
项目名字
:return: The project_name of this TemplateView.
:rtype: str
"""
return self._project_name
@project_name.setter
def project_name(self, project_name):
"""Sets the project_name of this TemplateView.
项目名字
:param project_name: The project_name of this TemplateView.
:type: str
"""
self._project_name = project_name
@property
def is_watch(self):
"""Gets the is_watch of this TemplateView.
是否关注
:return: The is_watch of this TemplateView.
:rtype: bool
"""
return self._is_watch
@is_watch.setter
def is_watch(self, is_watch):
"""Sets the is_watch of this TemplateView.
是否关注
:param is_watch: The is_watch of this TemplateView.
:type: bool
"""
self._is_watch = is_watch
@property
def description(self):
"""Gets the description of this TemplateView.
模板描述
:return: The description of this TemplateView.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateView.
模板描述
:param description: The description of this TemplateView.
:type: str
"""
self._description = description
@property
def parameter(self):
"""Gets the parameter of this TemplateView.
:return: The parameter of this TemplateView.
:rtype: TemplateParam
"""
return self._parameter
@parameter.setter
def parameter(self, parameter):
"""Sets the parameter of this TemplateView.
:param parameter: The parameter of this TemplateView.
:type: TemplateParam
"""
self._parameter = parameter
@property
def flow(self):
"""Gets the flow of this TemplateView.
:return: The flow of this TemplateView.
:rtype: FlowItem
"""
return self._flow
@flow.setter
def flow(self, flow):
"""Sets the flow of this TemplateView.
:param flow: The flow of this TemplateView.
:type: FlowItem
"""
self._flow = flow
@property
def states(self):
"""Gets the states of this TemplateView.
:return: The states of this TemplateView.
:rtype: StateItem
"""
return self._states
@states.setter
def states(self, states):
"""Sets the states of this TemplateView.
:param states: The states of this TemplateView.
:type: StateItem
"""
self._states = states
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateView):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.640385
| 304
| 0.579333
|
4d4ee63f17394cafaba0656cc86597c10da02852
| 5,854
|
py
|
Python
|
client/diagnose.py
|
brad999/nikita-client
|
e025bb57eb78a169ce6b1807c58788ba59c08933
|
[
"MIT"
] | 1
|
2015-05-31T18:48:47.000Z
|
2015-05-31T18:48:47.000Z
|
client/diagnose.py
|
brad999/nikita
|
e025bb57eb78a169ce6b1807c58788ba59c08933
|
[
"MIT"
] | 34
|
2015-04-09T01:48:43.000Z
|
2015-05-04T17:23:24.000Z
|
client/diagnose.py
|
brad999/nikita
|
e025bb57eb78a169ce6b1807c58788ba59c08933
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8-*-
import os
import sys
import time
import socket
import subprocess
import pkgutil
import logging
import pip.req
import nikitapath
if sys.version_info < (3, 3):
from distutils.spawn import find_executable
else:
from shutil import which as find_executable
logger = logging.getLogger(__name__)
def check_network_connection(server="www.google.com"):
"""
Checks if nikita can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
"www.google.com")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking network connection to server '%s'...", server)
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(server)
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection((host, 80), 2)
except Exception:
logger.debug("Network connection not working")
return False
else:
logger.debug("Network connection working")
return True
def check_executable(executable):
"""
Checks if an executable exists in $PATH.
Arguments:
executable -- the name of the executable (e.g. "echo")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking executable '%s'...", executable)
executable_path = find_executable(executable)
found = executable_path is not None
if found:
logger.debug("Executable '%s' found: '%s'", executable,
executable_path)
else:
logger.debug("Executable '%s' not found", executable)
return found
def check_python_import(package_or_module):
"""
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking python import '%s'...", package_or_module)
loader = pkgutil.get_loader(package_or_module)
found = loader is not None
if found:
logger.debug("Python %s '%s' found: %r",
"package" if loader.is_package(package_or_module)
else "module", package_or_module, loader.get_filename())
else:
logger.debug("Python import '%s' not found", package_or_module)
return found
def get_pip_requirements(fname=os.path.join(nikitapath.LIB_PATH,
'requirements.txt')):
"""
Gets the PIP requirements from a text file. If the files does not exists
or is not readable, it returns None
Arguments:
fname -- (optional) the requirement text file (Default:
"client/requirements.txt")
Returns:
A list of pip requirement objects or None
"""
logger = logging.getLogger(__name__)
if os.access(fname, os.R_OK):
reqs = list(pip.req.parse_requirements(fname))
logger.debug("Found %d PIP requirements in file '%s'", len(reqs),
fname)
return reqs
else:
logger.debug("PIP requirements file '%s' not found or not readable",
fname)
def get_git_revision():
"""
Gets the current git revision hash as hex string. If the git executable is
missing or git is unable to get the revision, None is returned
Returns:
A hex string or None
"""
logger = logging.getLogger(__name__)
if not check_executable('git'):
logger.warning("'git' command not found, git revision not detectable")
return None
output = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
if not output:
logger.warning("Couldn't detect git revision (not a git repository?)")
return None
return output
def run():
"""
Performs a series of checks against the system and writes the results to
the logging system.
Returns:
The number of failed checks as integer
"""
logger = logging.getLogger(__name__)
# Set loglevel of this module least to info
loglvl = logger.getEffectiveLevel()
if loglvl == logging.NOTSET or loglvl > logging.INFO:
logger.setLevel(logging.INFO)
logger.info("Starting nikita diagnostic at %s" % time.strftime("%c"))
logger.info("Git revision: %r", get_git_revision())
failed_checks = 0
if not check_network_connection():
failed_checks += 1
for executable in ['phonetisaurus-g2p', 'espeak', 'say']:
if not check_executable(executable):
logger.warning("Executable '%s' is missing in $PATH", executable)
failed_checks += 1
for req in get_pip_requirements():
logger.debug("Checking PIP package '%s'...", req.name)
if not req.check_if_exists():
logger.warning("PIP package '%s' is missing", req.name)
failed_checks += 1
else:
logger.debug("PIP package '%s' found", req.name)
for fname in [os.path.join(nikitapath.APP_PATH, os.pardir, "phonetisaurus",
"g014b2b.fst")]:
logger.debug("Checking file '%s'...", fname)
if not os.access(fname, os.R_OK):
logger.warning("File '%s' is missing", fname)
failed_checks += 1
else:
logger.debug("File '%s' found", fname)
if not failed_checks:
logger.info("All checks passed")
else:
logger.info("%d checks failed" % failed_checks)
return failed_checks
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout)
logger = logging.getLogger()
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
run()
| 30.175258
| 79
| 0.627947
|
5a6cc6bed39bb8978b1cad2ed760d88a2018ca77
| 920
|
py
|
Python
|
km_api/functional_tests/know_me/profile/profile_items/list_entries/test_sort_list_entries.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 4
|
2017-08-03T00:46:31.000Z
|
2018-11-06T03:32:32.000Z
|
km_api/functional_tests/know_me/profile/profile_items/list_entries/test_sort_list_entries.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 526
|
2017-06-27T18:13:59.000Z
|
2021-06-10T18:00:21.000Z
|
km_api/functional_tests/know_me/profile/profile_items/list_entries/test_sort_list_entries.py
|
knowmetools/km-api
|
e4b72484c42e88a6c0087c9b1d5fef240e66cbb0
|
[
"Apache-2.0"
] | 1
|
2017-07-10T19:46:27.000Z
|
2017-07-10T19:46:27.000Z
|
from rest_framework import status
def test_sort_list_entries(
api_client,
enable_premium_requirement,
profile_list_entry_factory,
user_factory,
):
"""
Premium users should be able to sort list entries with respect to
their parent profile item.
"""
password = "password"
user = user_factory(has_premium=True, password="password")
api_client.log_in(user.primary_email.email, password)
e1 = profile_list_entry_factory(
profile_item__topic__profile__km_user__user=user
)
e2 = profile_list_entry_factory(profile_item=e1.profile_item)
data = {"order": [e2.pk, e1.pk]}
url = f"/know-me/profile/profile-items/{e1.profile_item.pk}/list-entries/"
response = api_client.put(url, data)
assert response.status_code == status.HTTP_200_OK
sorted1, sorted2 = api_client.get(url).json()
assert [sorted1["id"], sorted2["id"]] == data["order"]
| 28.75
| 78
| 0.709783
|
c19c4316a774aa05c01d490fca3935bc7052d09d
| 2,317
|
py
|
Python
|
src/ferment_ng/scripts.py
|
ushacow/ferment
|
1ba77be38243f9c074c76bd7b901606f1077d8f3
|
[
"Apache-2.0"
] | 1
|
2020-08-19T20:45:07.000Z
|
2020-08-19T20:45:07.000Z
|
src/ferment_ng/scripts.py
|
ushacow/ferment
|
1ba77be38243f9c074c76bd7b901606f1077d8f3
|
[
"Apache-2.0"
] | null | null | null |
src/ferment_ng/scripts.py
|
ushacow/ferment
|
1ba77be38243f9c074c76bd7b901606f1077d8f3
|
[
"Apache-2.0"
] | 1
|
2019-02-13T15:29:46.000Z
|
2019-02-13T15:29:46.000Z
|
import click
import docker
from wheezy.template.engine import Engine
from wheezy.template.ext.core import CoreExtension
from wheezy.template.ext.code import CodeExtension
from wheezy.template.loader import DictLoader
from . import templates
import logging
LOG = logging.getLogger(__name__)
LOG_LEVELS = {
"info": logging.INFO,
"warn": logging.WARN,
"debug": logging.DEBUG,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
class Context(dict):
def __init__(self, *args, **kwargs):
self.__dict__ = self
super(Context, self).__init__(*args, **kwargs)
class FermConfig(object):
def __init__(self, path):
self.path = path
template_dct = {
'docker': templates.docker,
}
engine = Engine(
loader=DictLoader(template_dct),
extensions=[
CoreExtension(),
CodeExtension()
]
)
self.templates = {
name: engine.get_template(name) for name in template_dct
}
def get_config(self, config):
return self.templates['docker'].render(config)
@click.group()
@click.option(
"--log-level",
type=click.Choice([k for k, v in sorted(LOG_LEVELS.iteritems(), key=lambda x: x[1])]),
default="info",
help="Logging level.")
@click.pass_context
def run(ctx, log_level):
logging.basicConfig(level=LOG_LEVELS[log_level])
ctx.obj = Context()
@run.group("docker")
@click.option(
"api", "--docker", "-d",
type=click.Path(),
default="unix://var/run/docker.sock",
help="The docker api socket."
)
@click.option(
"--cidr", "-c", default="172.18.0.0/16",
help="Docker CIDR."
)
@click.option(
"--interface", "-i", default="docker0",
help="Docker interface."
)
@click.pass_context
def docker_grp(ctx, api, cidr, interface):
ctx.obj.client = docker.Client(base_url=api)
ctx.obj.cidr = cidr
ctx.obj.interface = interface
@docker_grp.command(name="config")
@click.pass_context
def docker_config(ctx):
ferm = FermConfig(None)
# get all containers
containers = ctx.obj.client.containers()
ctx.obj.containers = [
ctx.obj.client.inspect_container(container['Id'])
for container in containers
]
click.echo(ferm.get_config(ctx.obj))
| 22.940594
| 90
| 0.637894
|
4f2911f123117d6e653c7f09d545d8f115019bc4
| 1,081
|
py
|
Python
|
utils.py
|
karush17/esac
|
9a17d5a6bcff25ed5799e122e59aaedc696e11ac
|
[
"MIT"
] | 22
|
2020-06-29T03:06:02.000Z
|
2021-12-01T12:45:08.000Z
|
utils.py
|
karush17/Rough-Notebooks
|
9a17d5a6bcff25ed5799e122e59aaedc696e11ac
|
[
"MIT"
] | 3
|
2021-02-09T01:00:38.000Z
|
2021-10-05T16:49:48.000Z
|
utils.py
|
karush17/Rough-Notebooks
|
9a17d5a6bcff25ed5799e122e59aaedc696e11ac
|
[
"MIT"
] | 6
|
2020-06-29T03:06:04.000Z
|
2022-02-26T02:01:56.000Z
|
import math
import torch
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def dm_wrap(state, wrap=True):
if wrap==True:
state = state["observations"]
return state
| 30.885714
| 83
| 0.630897
|
687e130dc8b68600c36cc29b95fd12e6fa44eb37
| 1,282
|
py
|
Python
|
.Python Challenges - 101Computing/Fast Typing Test/interface.py
|
Gustavo-daCosta/Projetos
|
459cbf6fc0b67532c1bf2043ccdb915d16ba0df2
|
[
"MIT"
] | 2
|
2021-09-13T22:04:30.000Z
|
2022-01-05T14:01:43.000Z
|
.Python Challenges - 101Computing/Fast Typing Test/interface.py
|
Gustavo-daCosta/Projetos
|
459cbf6fc0b67532c1bf2043ccdb915d16ba0df2
|
[
"MIT"
] | null | null | null |
.Python Challenges - 101Computing/Fast Typing Test/interface.py
|
Gustavo-daCosta/Projetos
|
459cbf6fc0b67532c1bf2043ccdb915d16ba0df2
|
[
"MIT"
] | null | null | null |
from tkinter import *
import time
from os import system
from random import randint
app = Tk()
app.title("Fast Typing Test")
app.geometry("600x550")
def semComando():
print("Sem comando")
barra_menu = Menu(app)
menuHelp = Menu(barra_menu, tearoff=0)
menuHelp.add_command(label="How the app works?", command=semComando)
menuHelp.add_command(label="Pangram's list")
barra_menu.add_cascade(label="Help", menu=menuHelp)
menuAbout = Menu(barra_menu, tearoff=0)
menuAbout.add_command(label="Source Code", command=semComando)
menuAbout.add_command(label="Credits", command=semComando)
menuAbout.add_command(label="About", command=semComando)
app.config(menu=barra_menu)
titulo = Label(app, text="Fast Typing Test Challenge", font=("Helvetica", 20))
titulo.pack()
textoMenu = '''Level [1] - Type the Alphabet
Level [2] - Type the following quote: "The quick brown fox jumps over the lazy dog"
Level [3] - Type a random pangram of the list
— “The quick brown fox jumps over the lazy dog”
— “The five boxing wizards jump quickly”
— “Pack my box with five dozen liquor jugs”
— "The jay, pig, fox, zebra and my wolves quack!"
— "By Jove, my quick study of lexicography won a prize!"'''
Menu = Label(app, text=textoMenu, font=("Helvetica", 12))
Menu.place(x=10, y=40)
app.mainloop()
| 30.52381
| 83
| 0.74259
|
9993477c7ce93e69f346a448139f363536941d25
| 1,587
|
py
|
Python
|
src/olympia/constants/scanners.py
|
covariant/addons-server
|
41e6ee9e426facb19a1e1ca8d40277cb6f94a7da
|
[
"BSD-3-Clause"
] | 843
|
2016-02-09T13:00:37.000Z
|
2022-03-20T19:17:06.000Z
|
src/olympia/constants/scanners.py
|
covariant/addons-server
|
41e6ee9e426facb19a1e1ca8d40277cb6f94a7da
|
[
"BSD-3-Clause"
] | 10,187
|
2016-02-05T23:51:05.000Z
|
2022-03-31T15:24:44.000Z
|
src/olympia/constants/scanners.py
|
covariant/addons-server
|
41e6ee9e426facb19a1e1ca8d40277cb6f94a7da
|
[
"BSD-3-Clause"
] | 551
|
2016-02-08T20:32:16.000Z
|
2022-03-15T16:49:24.000Z
|
from django.utils.translation import gettext_lazy as _
CUSTOMS = 1
WAT = 2
YARA = 3
MAD = 4
SCANNERS = {CUSTOMS: 'customs', WAT: 'wat', YARA: 'yara', MAD: 'mad'}
# Action IDs are also used for severity (the higher, the more severe).
# The field is a PositiveSmallIntegerField, it should go up to 65535.
NO_ACTION = 1
FLAG_FOR_HUMAN_REVIEW = 20
DELAY_AUTO_APPROVAL = 100
DELAY_AUTO_APPROVAL_INDEFINITELY = 200
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT = 300
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT_FUTURE_APPROVALS = 400
ACTIONS = {
NO_ACTION: _('No action'),
FLAG_FOR_HUMAN_REVIEW: _('Flag for human review'),
DELAY_AUTO_APPROVAL: _('Delay auto-approval'),
DELAY_AUTO_APPROVAL_INDEFINITELY: _('Delay auto-approval indefinitely'),
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT: _(
'Delay auto-approval indefinitely and add restrictions'
),
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT_FUTURE_APPROVALS: _(
'Delay auto-approval indefinitely and add restrictions to future approvals'
),
}
UNKNOWN = None
TRUE_POSITIVE = 1
FALSE_POSITIVE = 2
INCONCLUSIVE = 3
RESULT_STATES = {
UNKNOWN: _('Unknown'),
TRUE_POSITIVE: _('True positive'),
FALSE_POSITIVE: _('False positive'),
INCONCLUSIVE: _('Inconclusive'),
}
NEW = 1
RUNNING = 2
ABORTED = 3
COMPLETED = 4
ABORTING = 5
SCHEDULED = 6
QUERY_RULE_STATES = {
NEW: _('New'),
RUNNING: _('Running'),
ABORTED: _('Aborted'),
ABORTING: _('Aborting'),
COMPLETED: _('Completed'),
SCHEDULED: _('Scheduled'),
}
LABEL_BAD = 'bad'
LABEL_GOOD = 'good'
| 25.190476
| 83
| 0.724008
|
655043a83e02a52b06eb32a34ff7c4ca7f09a3e4
| 1,505
|
py
|
Python
|
damage_network.py
|
addison-schwamb/WorkingMemoryModelingAdapted
|
47c0319b117152d7f88b8740593846b3aa06e7a3
|
[
"Apache-2.0"
] | null | null | null |
damage_network.py
|
addison-schwamb/WorkingMemoryModelingAdapted
|
47c0319b117152d7f88b8740593846b3aa06e7a3
|
[
"Apache-2.0"
] | null | null | null |
damage_network.py
|
addison-schwamb/WorkingMemoryModelingAdapted
|
47c0319b117152d7f88b8740593846b3aa06e7a3
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions for damaging neural networks
written in Python 3.8.3
@ Addison Schwamb
"""
import numpy as np
import time
def remove_neurons(JT, pct_rmv, inhibitory):
tic = time.time()
JT_dim = np.shape(JT)
if inhibitory:
remove = JT<0
else:
remove = JT>0
num_to_keep = ((1-pct_rmv)*(remove.sum())).astype(int)
keep_indices = np.random.randint(1,JT_dim[0],(2,num_to_keep))
num_kept = 0
for i in range(num_to_keep):
if remove[keep_indices[0][i]][keep_indices[1][i]]:
remove[keep_indices[0][i]][keep_indices[1][i]] = False
num_kept += 1
elif np.count_nonzero(remove[keep_indices[0][i]])>0:
j = (keep_indices[1][i]+1)%(JT_dim[0])
while not remove[keep_indices[0][i]][j]:
j = (j+1)%(JT_dim[0])
remove[keep_indices[0][i]][j] = False
num_kept += 1
elif np.count_nonzero(remove,axis=0)[keep_indices[1][i]]>0:
j = (keep_indices[0][i]+1)%(JT_dim[1])
while not remove[j][keep_indices[1][i]]:
j = (j+1)%(JT_dim[1])
remove[j][keep_indices[1][i]] = False
num_kept += 1
else:
keep_indices[0][i] = np.random.randint(1,JT_dim[0])
keep_indices[1][i] = np.random.randint(1,JT_dim[1])
i -= 1
JT[remove] = 0
toc = time.time()
print('time out: ', (toc-tic)/60)
print(np.size(JT))
print(num_to_keep)
print(num_kept)
return JT
| 31.354167
| 67
| 0.554817
|
caed021c66a6443cdd7f613de79ff95bb255cde5
| 3,138
|
py
|
Python
|
tests/ut/python/nn/test_psnr.py
|
ythlml/mindspore
|
028ae212624164044cfaa84f347fc502cb7fcb0f
|
[
"Apache-2.0"
] | 7
|
2020-05-24T03:19:26.000Z
|
2020-05-24T03:20:00.000Z
|
tests/ut/python/nn/test_psnr.py
|
ythlml/mindspore
|
028ae212624164044cfaa84f347fc502cb7fcb0f
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/nn/test_psnr.py
|
ythlml/mindspore
|
028ae212624164044cfaa84f347fc502cb7fcb0f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
test psnr
"""
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.api import _executor
class PSNRNet(nn.Cell):
def __init__(self, max_val=1.0):
super(PSNRNet, self).__init__()
self.net = nn.PSNR(max_val)
def construct(self, img1, img2):
return self.net(img1, img2)
def test_compile_psnr():
max_val = 1.0
net = PSNRNet(max_val)
img1 = Tensor(np.random.random((8, 3, 16, 16)))
img2 = Tensor(np.random.random((8, 3, 16, 16)))
_executor.compile(net, img1, img2)
def test_compile_psnr_grayscale():
max_val = 255
net = PSNRNet(max_val)
img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8))
_executor.compile(net, img1, img2)
def test_psnr_max_val_negative():
max_val = -1
with pytest.raises(ValueError):
net = PSNRNet(max_val)
def test_psnr_max_val_bool():
max_val = True
with pytest.raises(TypeError):
net = PSNRNet(max_val)
def test_psnr_max_val_zero():
max_val = 0
with pytest.raises(ValueError):
net = PSNRNet(max_val)
def test_psnr_different_shape():
shape_1 = (8, 3, 16, 16)
shape_2 = (8, 3, 8, 8)
img1 = Tensor(np.random.random(shape_1))
img2 = Tensor(np.random.random(shape_2))
net = PSNRNet()
with pytest.raises(ValueError):
_executor.compile(net, img1, img2)
def test_psnr_different_dtype():
dtype_1 = mstype.float32
dtype_2 = mstype.float16
img1 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_1)
img2 = Tensor(np.random.random((8, 3, 16, 16)), dtype=dtype_2)
net = PSNRNet()
with pytest.raises(TypeError):
_executor.compile(net, img1, img2)
def test_psnr_invalid_5d_input():
shape_1 = (8, 3, 16, 16)
shape_2 = (8, 3, 8, 8)
invalid_shape = (8, 3, 16, 16, 1)
img1 = Tensor(np.random.random(shape_1))
invalid_img1 = Tensor(np.random.random(invalid_shape))
img2 = Tensor(np.random.random(shape_2))
invalid_img2 = Tensor(np.random.random(invalid_shape))
net = PSNRNet()
with pytest.raises(ValueError):
_executor.compile(net, invalid_img1, img2)
with pytest.raises(ValueError):
_executor.compile(net, img1, invalid_img2)
with pytest.raises(ValueError):
_executor.compile(net, invalid_img1, invalid_img2)
| 29.603774
| 78
| 0.666348
|
a949b74a92c19a5bac0fce2b81e52ac95a06605b
| 1,436
|
py
|
Python
|
rcaudio/simple_recorder.py
|
selenakkaya/MushroomGame
|
59ecfddb536929cd137198bdab01e9ffab33cc5f
|
[
"MIT"
] | 31
|
2018-09-27T03:35:06.000Z
|
2022-01-11T09:49:26.000Z
|
rcaudio/simple_recorder.py
|
selenakkaya/MushroomGame
|
59ecfddb536929cd137198bdab01e9ffab33cc5f
|
[
"MIT"
] | 3
|
2018-11-20T07:49:24.000Z
|
2021-01-06T11:48:41.000Z
|
rcaudio/simple_recorder.py
|
selenakkaya/MushroomGame
|
59ecfddb536929cd137198bdab01e9ffab33cc5f
|
[
"MIT"
] | 7
|
2019-04-23T06:32:23.000Z
|
2020-09-25T14:18:32.000Z
|
from .core_recorder import CoreRecorder
import threading
import logging
import time
class SimpleRecorder(threading.Thread):
def __init__(self,
sr = 2000, #Sample Rate
):
threading.Thread.__init__(self)
self.audio_data = []
self.audio_lock = threading.Lock()
self.logger = logging.getLogger(__name__+".SimpleWatcher")
self.recorder = CoreRecorder(sr = sr)
self.analyzers = []
self.sr = sr
self.start_time = None
self.__running = threading.Event()
self.__running.set()
def register(self,analyzer):
self.analyzers.append(analyzer)
analyzer.register_recorder(self)
def run(self):
self.recorder.start()
for analyzer in self.analyzers:
analyzer.start()
while self.__running.isSet():
self.start_time = self.recorder.start_time
if self.start_time is not None:
break
time.sleep(.05)
while self.__running.isSet():
while not self.recorder.buffer.empty():
v = self.recorder.buffer.get()
self.audio_data.append(v)
for analyzer in self.analyzers:
analyzer.stop()
analyzer.join()
self.recorder.stop()
self.recorder.join()
def stop(self):
self.logger.warn("Stop Signal Received!")
self.__running.clear()
| 28.156863
| 66
| 0.590529
|
93f2fa0168f4bec25485db2bc7a869c01979430b
| 2,286
|
py
|
Python
|
scripts/testing/gateway-responses/test-sender.py
|
jcjveraa/EDDN
|
d0cbae6b7a2cac180dd414cbc324c2d84c867cd8
|
[
"BSD-3-Clause"
] | 100
|
2017-07-19T10:11:04.000Z
|
2020-07-05T22:07:39.000Z
|
scripts/testing/gateway-responses/test-sender.py
|
jcjveraa/EDDN
|
d0cbae6b7a2cac180dd414cbc324c2d84c867cd8
|
[
"BSD-3-Clause"
] | 24
|
2017-07-03T22:30:32.000Z
|
2020-07-04T21:04:30.000Z
|
scripts/testing/gateway-responses/test-sender.py
|
jcjveraa/EDDN
|
d0cbae6b7a2cac180dd414cbc324c2d84c867cd8
|
[
"BSD-3-Clause"
] | 23
|
2017-08-08T22:57:16.000Z
|
2020-06-26T06:19:25.000Z
|
#!/usr/bin/env python3
# vim: tabstop=4 shiftwidth=4 expandtab smarttab textwidth=0 wrapmargin=0
import argparse
import requests
import zlib
upload_url = 'https://dev.eddn.edcd.io:4432/upload/'
def send_message(url, args):
print(f'''
send_message:
URL: {url}
input file: "{args.messagefile}"
''')
with open(args.messagefile, 'r') as f:
msg = f.read()
if args.formdata:
if args.formdata == 'good':
msg = 'data=' + msg
elif args.formdata == 'bad':
msg = 'BADLYENCODED=' + msg
s = requests.Session()
if args.gzip:
# We assume that the argparse setup is enforcing the value being
# valid, i.e. `'good'` if it's not `'bad'`.
msg = zlib.compress(msg.encode('utf-8'))
s.headers['Content-Encoding'] = 'gzip'
if args.gzip == 'bad':
# Prepend a character so it's not a valid gzip header
msg = b'w' + msg
r = s.post(upload_url, data=msg)
print(f'Response: {r!r}')
print(f'Body: {r.content.decode()}')
if __name__ == "__main__":
__parser = argparse.ArgumentParser(
description='Send test messages to an EDDN /upload/ endpoint',
)
__parser.add_argument(
'--url',
metavar='<full URL of /upload/ endpoint>',
help='The full URL of an EDDN /upload/ endpoint',
)
__parser.add_argument(
'--formdata',
choices=('good', 'bad'),
help='Specify to form-encode the request body',
)
__parser.add_argument(
'--gzip',
choices=('good', 'bad'),
help='Specify to gzip-compress the request body',
)
__parser.add_argument(
'messagefile',
metavar='<input file name>',
help='Name of a file containing the body of the EDDN message to be sent',
)
args = __parser.parse_args()
if args.url:
# Allow for some short aliases, but NOT!!! for live !!!
if args.url == 'beta':
upload_url = 'https://beta.eddn.edcd.io:4431/upload/'
elif args.url == 'dev':
upload_url = 'https://dev.eddn.edcd.io:4432/upload/'
else:
upload_url = args.url
send_message(upload_url, args)
| 26.275862
| 81
| 0.56168
|
9c7f0b86d23252eb667a628a9f5a6c0fd2927da0
| 5,634
|
py
|
Python
|
aiida_quantumespresso/calculations/matdyn.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/calculations/matdyn.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/calculations/matdyn.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from aiida.common.utils import classproperty
from aiida.orm.data.folder import FolderData
from aiida.orm.data.remote import RemoteData
from aiida.orm.data.array.kpoints import KpointsData
from aiida_quantumespresso.calculations.namelists import NamelistsCalculation
from aiida_quantumespresso.calculations.q2r import Q2rCalculation
class MatdynCalculation(NamelistsCalculation):
"""
matdyn.x code of the Quantum ESPRESSO distribution, used to obtain the
phonon frequencies in reciprocal space from the interatomic force constants given by q2r.
For more information, refer to http://www.quantum-espresso.org/
"""
def _init_internal_params(self):
super(MatdynCalculation, self)._init_internal_params()
self._PHONON_FREQUENCIES_NAME = 'phonon_frequencies.dat'
self._PHONON_MODES_NAME = 'phonon_displacements.dat'
self._PHONON_DOS_NAME = 'phonon_dos.dat'
self._default_namelists = ['INPUT']
self._blocked_keywords = [('INPUT','flfrq',self._PHONON_FREQUENCIES_NAME), # output freq.
('INPUT','flvec',self._PHONON_MODES_NAME), # output displ.
('INPUT','fldos',self._PHONON_DOS_NAME), # output dos
('INPUT','q_in_cryst_coord',True), # kpoints always in crystal coordinates
# this is dynamically added in the _prepare_for_submission
#('INPUT','flfrc',Q2rCalculation.FORCE_CONSTANTS_NAME), # input
]
self._internal_retrieve_list = [self._PHONON_FREQUENCIES_NAME,
self._PHONON_DOS_NAME]
# Default Matdyn output parser provided by AiiDA
self._default_parser = 'quantumespresso.matdyn'
@classproperty
def _use_methods(cls):
"""
Use_* methods for the matdyn class.
"""
retdict = NamelistsCalculation._use_methods
retdict.update({
"kpoints": {
'valid_types': (KpointsData),
'additional_parameter': None,
'linkname': 'kpoints',
'docstring': ("Kpoints on which to calculate the phonon "
"frequencies"),
},
})
return retdict
def use_parent_calculation(self,calc):
"""
Set the parent calculation,
from which it will inherit the outputsubfolder.
The link will be created from parent RemoteData and NamelistCalculation
"""
if not isinstance(calc,Q2rCalculation):
raise ValueError("Parent calculation must be a Q2rCalculation")
from aiida.common.exceptions import UniquenessError
localdatas = [_[1] for _ in calc.get_outputs(also_labels=True)
if _[0] == calc.get_linkname_force_matrix()]
if len(localdatas) == 0:
raise UniquenessError("No output retrieved data found in the parent "
"calc, probably it did not finish yet, "
"or it crashed")
if len(localdatas) != 1:
raise UniquenessError("More than one output retrieved data found")
localdata = localdatas[0]
self.use_parent_folder(localdata)
def _get_following_text(self, inputdict, settings):
"""
Add the kpoints after the namelist.
This function should consume the content of inputdict (if it requires
a different node) or the keys inside settings, using the 'pop' method,
so that inputdict and settings should remain empty at the end of
_prepare_for_submission, if all flags/nodes were recognized
"""
from aiida.common.exceptions import InputValidationError
try:
kpoints = inputdict.pop(self.get_linkname('kpoints'))
except KeyError:
raise InputValidationError("No kpoints specified for this calculation")
if not isinstance(kpoints, KpointsData):
raise InputValidationError("kpoints is not of type KpointsData")
try:
klist = kpoints.get_kpoints()
except AttributeError:
klist = kpoints.get_kpoints_mesh(print_list=True)
retlist = ["{}".format(len(klist))]
for k in klist:
retlist.append("{:18.10f} {:18.10f} {:18.10f}".format(*k))
return "\n".join(retlist)+"\n"
def _prepare_for_submission(self,tempfolder, inputdict):
from aiida.orm.data.singlefile import SinglefileData
parent_calc_folder = inputdict.get(self.get_linkname('parent_folder'),
None)
if isinstance(parent_calc_folder, SinglefileData):
self._blocked_keywords.append(
('INPUT', 'flfrc', os.path.split(
parent_calc_folder.get_file_abs_path())[1] ))
else:
raise NotImplementedError(
"Input different from SinglefileData is not supported"
" yet for MatdynCalculation; it is {}".format(
type(parent_calc_folder)))
self._blocked_keywords.append(
('INPUT', 'flfrc', Q2rCalculation._FORCE_CONSTANTS_NAME ))
calcinfo = super(MatdynCalculation, self)._prepare_for_submission(
tempfolder, inputdict)
return calcinfo
| 43.674419
| 108
| 0.599397
|
a17a354f9959537b8175591c7f74e6c0a1a4108f
| 9,484
|
py
|
Python
|
matdeeplearn/models/dospredict.py
|
vxfung/MatDeepLearn_DOS
|
4c2998bacfe5627958bc792ea62ec2baab2507a1
|
[
"MIT"
] | 1
|
2022-03-28T13:09:49.000Z
|
2022-03-28T13:09:49.000Z
|
matdeeplearn/models/dospredict.py
|
vxfung/MatDeepLearn_DOS
|
4c2998bacfe5627958bc792ea62ec2baab2507a1
|
[
"MIT"
] | null | null | null |
matdeeplearn/models/dospredict.py
|
vxfung/MatDeepLearn_DOS
|
4c2998bacfe5627958bc792ea62ec2baab2507a1
|
[
"MIT"
] | null | null | null |
from typing import Union, Tuple
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import Sequential, Linear, BatchNorm1d, PReLU
import torch_geometric
from torch_geometric.typing import PairTensor, Adj, OptTensor, Size
from torch_geometric.nn.conv import MessagePassing
from torch_scatter import scatter_mean, scatter_add, scatter_max, scatter
from torch_geometric.nn import (
Set2Set,
global_mean_pool,
global_add_pool,
global_max_pool,
CGConv,
)
#GNN model
class DOSpredict(torch.nn.Module):
def __init__(
self,
data,
dim1=64,
dim2=64,
pre_fc_count=1,
gc_count=3,
batch_norm="True",
batch_track_stats="True",
dropout_rate=0.0,
**kwargs
):
super(DOSpredict, self).__init__()
if batch_track_stats == "False":
self.batch_track_stats = False
else:
self.batch_track_stats = True
self.batch_norm = batch_norm
self.dropout_rate = dropout_rate
##Determine gc dimension dimension
assert gc_count > 0, "Need at least 1 GC layer"
if pre_fc_count == 0:
self.gc_dim = data.num_features
else:
self.gc_dim = dim1
##Determine post_fc dimension
if pre_fc_count == 0:
post_fc_dim = data.num_features
else:
post_fc_dim = dim1
##Determine output dimension length
if data[0].y.ndim == 0:
output_dim = 1
else:
output_dim = len(data[0].y[0])
##Set up pre-GNN dense layers
if pre_fc_count > 0:
self.pre_lin_list = torch.nn.ModuleList()
for i in range(pre_fc_count):
if i == 0:
lin = Sequential(torch.nn.Linear(data.num_features, dim1), torch.nn.PReLU())
self.pre_lin_list.append(lin)
else:
lin = Sequential(torch.nn.Linear(dim1, dim1), torch.nn.PReLU())
self.pre_lin_list.append(lin)
elif pre_fc_count == 0:
self.pre_lin_list = torch.nn.ModuleList()
##Set up GNN layers
self.conv_list = torch.nn.ModuleList()
self.bn_list = torch.nn.ModuleList()
for i in range(gc_count):
conv = GC_block(self.gc_dim, data.num_edge_features, aggr="mean")
#conv = CGConv(self.gc_dim, data.num_edge_features, aggr="mean", batch_norm=False)
self.conv_list.append(conv)
if self.batch_norm == "True":
bn = BatchNorm1d(self.gc_dim, track_running_stats=self.batch_track_stats, affine=True)
self.bn_list.append(bn)
self.dos_mlp = Sequential(Linear(post_fc_dim, dim2),
torch.nn.PReLU(),
Linear(dim2, output_dim),
torch.nn.PReLU(),
)
self.scaling_mlp = Sequential(Linear(post_fc_dim, dim2),
torch.nn.PReLU(),
Linear(dim2, 1),
)
def forward(self, data):
##Pre-GNN dense layers
for i in range(0, len(self.pre_lin_list)):
if i == 0:
out = self.pre_lin_list[i](data.x)
else:
out = self.pre_lin_list[i](out)
##GNN layers
for i in range(0, len(self.conv_list)):
if len(self.pre_lin_list) == 0 and i == 0:
if self.batch_norm == "True":
out = self.conv_list[i](data.x, data.edge_index, data.edge_attr)
out = self.bn_list[i](out)
else:
out = self.conv_list[i](data.x, data.edge_index, data.edge_attr)
else:
if self.batch_norm == "True":
out = self.conv_list[i](out, data.edge_index, data.edge_attr)
out = self.bn_list[i](out)
else:
out = self.conv_list[i](out, data.edge_index, data.edge_attr)
out = F.dropout(out, p=self.dropout_rate, training=self.training)
##Post-GNN dense layers
dos_out = self.dos_mlp(out)
scaling = self.scaling_mlp(out)
if dos_out.shape[1] == 1:
return dos_out.view(-1), scaling.view(-1)
else:
return dos_out, scaling.view(-1)
# Smooth Overlap of Atomic Positions with neural network
class SOAP_DOS(torch.nn.Module):
def __init__(self, data, dim1, fc_count, **kwargs):
super(SOAP_DOS, self).__init__()
if data[0].y.ndim == 0:
output_dim = 1
else:
output_dim = len(data[0].y[0])
self.lin1 = torch.nn.Linear(data[0].extra_features_SOAP.shape[1], dim1)
self.lin_list_dos = torch.nn.ModuleList(
[torch.nn.Linear(dim1, dim1) for i in range(fc_count)]
)
self.lin_out_dos = torch.nn.Linear(dim1, output_dim)
self.lin_list_scaling = torch.nn.ModuleList(
[torch.nn.Linear(dim1, dim1) for i in range(fc_count)]
)
self.lin_out_scaling = torch.nn.Linear(dim1, 1)
def forward(self, data):
dos_out = F.relu(self.lin1(data.extra_features_SOAP))
scaling = F.relu(self.lin1(data.extra_features_SOAP))
for layer in self.lin_list_dos:
dos_out = F.relu(layer(dos_out))
dos_out = self.lin_out_dos(dos_out)
for layer in self.lin_list_scaling:
scaling = F.relu(layer(scaling))
scaling = self.lin_out_scaling(scaling)
if dos_out.shape[1] == 1:
return dos_out.view(-1), scaling.view(-1)
else:
return dos_out, scaling.view(-1)
# Local Many Body Tensor with neural network
class LMBTR_DOS(torch.nn.Module):
def __init__(self, data, dim1, fc_count, **kwargs):
super(LMBTR_DOS, self).__init__()
if data[0].y.ndim == 0:
output_dim = 1
else:
output_dim = len(data[0].y[0])
self.lin1 = torch.nn.Linear(data[0].extra_features_LMBTR.shape[1], dim1)
self.lin_list_dos = torch.nn.ModuleList(
[torch.nn.Linear(dim1, dim1) for i in range(fc_count)]
)
self.lin_out_dos = torch.nn.Linear(dim1, output_dim)
self.lin_list_scaling = torch.nn.ModuleList(
[torch.nn.Linear(dim1, dim1) for i in range(fc_count)]
)
self.lin_out_scaling = torch.nn.Linear(dim1, 1)
def forward(self, data):
dos_out = F.relu(self.lin1(data.extra_features_LMBTR))
scaling = F.relu(self.lin1(data.extra_features_LMBTR))
for layer in self.lin_list_dos:
dos_out = F.relu(layer(dos_out))
dos_out = self.lin_out_dos(dos_out)
for layer in self.lin_list_scaling:
scaling = F.relu(layer(scaling))
scaling = self.lin_out_scaling(scaling)
if dos_out.shape[1] == 1:
return dos_out.view(-1), scaling.view(-1)
else:
return dos_out, scaling.view(-1)
#Dummy model
class Dummy(torch.nn.Module):
def __init__(
self,
data,
**kwargs
):
super(Dummy, self).__init__()
self.lin = torch.nn.Linear(len(data[0].x[0]), len(data[0].y[0]))
def forward(self, data):
out = self.lin(data.x)*0
return out, torch.ones(out.shape[0]).to(out)
#####################################################
class GC_block(MessagePassing):
def __init__(self, channels: Union[int, Tuple[int, int]], dim: int = 0, aggr: str = 'mean', **kwargs):
super(GC_block, self).__init__(aggr=aggr, **kwargs)
self.channels = channels
self.dim = dim
if isinstance(channels, int):
channels = (channels, channels)
self.mlp = Sequential(Linear(sum(channels) + dim, channels[1]),
torch.nn.PReLU(),
)
self.mlp2 = Sequential(Linear(dim, dim),
torch.nn.PReLU(),
)
def forward(self, x: Union[Tensor, PairTensor], edge_index: Adj, edge_attr: OptTensor = None, size: Size = None) -> Tensor:
if isinstance(x, Tensor):
x: PairTensor = (x, x)
# propagate_type: (x: PairTensor, edge_attr: OptTensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
out += x[1]
return out
def message(self, x_i, x_j, edge_attr: OptTensor) -> Tensor:
z = torch.cat([x_i, x_j, self.mlp2(edge_attr)], dim=-1)
z = self.mlp(z)
return z
| 36.198473
| 128
| 0.515816
|
c0c438e63d46941a8a0f5ad9e851353a8375dbc5
| 3,781
|
py
|
Python
|
_unittests/ut_finance/test_stock_file.py
|
mohamedelkansouli/Ensae_py2
|
e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a
|
[
"MIT"
] | null | null | null |
_unittests/ut_finance/test_stock_file.py
|
mohamedelkansouli/Ensae_py2
|
e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a
|
[
"MIT"
] | null | null | null |
_unittests/ut_finance/test_stock_file.py
|
mohamedelkansouli/Ensae_py2
|
e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a
|
[
"MIT"
] | null | null | null |
"""
@brief test log(time=3s)
"""
import sys
import os
import unittest
import datetime
import warnings
from quandl.errors.quandl_error import LimitExceededError
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyensae.finance.astock import StockPrices, StockPricesHTTPException
class TestStockFile (unittest.TestCase):
def test_save_stock_google(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_cache_file_google")
cache = temp
try:
stock = StockPrices("NASDAQ:MSFT", folder=cache,
end=datetime.datetime(2014, 1, 15), url="google")
except StockPricesHTTPException as e:
warnings.warn(str(e))
return
file = os.path.join(cache, "save.txt")
if os.path.exists(file):
os.remove(file)
stock.to_csv(file)
self.assertTrue(os.path.exists(file))
stock2 = StockPrices(file, sep="\t")
self.assertEqual(stock.dataframe.shape, stock2.dataframe.shape)
df = stock2.dataframe
file = os.path.join(cache, "out_excel.xlsx")
if os.path.exists(file):
os.remove(file)
df.to_excel(file)
self.assertTrue(os.path.exists(file))
def test_save_stock_quandl(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_cache_file_quandl")
cache = temp
try:
stock = StockPrices("EURONEXT/BNP", url="quandl", folder=cache,
end=datetime.datetime(2017, 1, 15))
except LimitExceededError:
warnings.warn(
"[test_save_stock_quandl] reached quandl free quota. Stop test.")
return
file = os.path.join(cache, "save.txt")
if os.path.exists(file):
os.remove(file)
stock.to_csv(file)
self.assertTrue(os.path.exists(file))
stock2 = StockPrices(file, sep="\t")
self.assertEqual(stock.dataframe.shape, stock2.dataframe.shape)
df = stock2.dataframe
file = os.path.join(cache, "out_excel.xlsx")
if os.path.exists(file):
os.remove(file)
df.to_excel(file)
self.assertTrue(os.path.exists(file))
def test_save_stock_yahoo_new(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_cache_file_yahoo")
cache = temp
stock = StockPrices(
"AAPL",
folder=cache,
url="yahoo_new",
end=datetime.datetime(
2014,
1,
15))
file = os.path.join(cache, "save.txt")
if os.path.exists(file):
os.remove(file)
stock.to_csv(file)
self.assertTrue(os.path.exists(file))
stock2 = StockPrices(file, sep="\t")
self.assertEqual(stock.dataframe.shape, stock2.dataframe.shape)
df = stock2.dataframe
file = os.path.join(cache, "out_excel.xlsx")
if os.path.exists(file):
os.remove(file)
df.to_excel(file)
self.assertTrue(os.path.exists(file))
if __name__ == "__main__":
unittest.main()
| 28.216418
| 81
| 0.579212
|
b42465cec5f949e4e3ada5be78ff768320a11910
| 11,286
|
py
|
Python
|
uweb3/connections.py
|
stefvanhouten/uweb3
|
bd6b03a3c819582afd845b31970b8a49312d1245
|
[
"ISC"
] | null | null | null |
uweb3/connections.py
|
stefvanhouten/uweb3
|
bd6b03a3c819582afd845b31970b8a49312d1245
|
[
"ISC"
] | 3
|
2020-05-20T09:53:14.000Z
|
2020-07-12T21:42:37.000Z
|
uweb3/connections.py
|
stefvanhouten/uweb3
|
bd6b03a3c819582afd845b31970b8a49312d1245
|
[
"ISC"
] | 3
|
2020-04-29T09:49:07.000Z
|
2020-12-03T09:51:22.000Z
|
#!/usr/bin/python
"""This file contains all the connectionManager classes that interact with
databases, restfull apis, secure cookies, config files etc."""
__author__ = 'Jan Klopper (janunderdark.nl)'
__version__ = 0.1
import os
import sys
from base64 import b64encode
class ConnectionError(Exception):
"""Error class thrown when the underlying connectors thrown an error on
connecting."""
class ConnectionManager(object):
"""This is the connection manager object that is handled by all Model Objects.
It finds out which connection was requested by looking at the call stack, and
figuring out what database type the model class calling it belongs to.
Connected databases are stored and reused.
On delete, the databases are closed and any lingering transactions are
committed. to complete the database writes.
"""
DEFAULTCONNECTIONMANAGER = None
def __init__(self, config, options, debug):
self.__connectors = {} # classes
self.__connections = {} # instances
self.config = config
self.options = options
self.debug = debug
self.LoadDefaultConnectors()
def LoadDefaultConnectors(self):
self.RegisterConnector(SignedCookie)
self.RegisterConnector(Mysql, True)
self.RegisterConnector(Sqlite)
self.RegisterConnector(Mongo)
self.RegisterConnector(SqlAlchemy)
def RegisterConnector(self, classname, default=False):
"""Make the ConnectonManager aware of a new type of connector."""
if default:
self.DEFAULTCONNECTIONMANAGER = classname.Name()
self.__connectors[classname.Name()] = classname
def RelevantConnection(self, level=2):
"""Returns the relevant database connection dependant on the caller model
class.
If the caller model cannot be determined, the 'relational' database
connection is returned as a fallback method.
Level indicates how many stack layers we should go up. Defaults to two.
"""
# Figure out caller type or instance
# pylint: disable=W0212
#TODO use inspect module instead, and iterate over frames
caller_locals = sys._getframe(level).f_locals
# pylint: enable=W0212
if 'self' in caller_locals:
caller_cls = type(caller_locals['self'])
else:
caller_cls = caller_locals.get('cls', type)
# Decide the type of connection to return for this caller
con_type = (caller_cls._CONNECTOR if hasattr(caller_cls, '_CONNECTOR') else
self.DEFAULTCONNECTIONMANAGER)
if (con_type in self.__connections and
hasattr(self.__connections[con_type], 'connection')):
return self.__connections[con_type].connection
else:
request = sys._getframe(3).f_locals['self'].req
try:
# instantiate a connection
self.__connections[con_type] = self.__connectors[con_type](
self.config, self.options, request, self.debug)
return self.__connections[con_type].connection
except KeyError:
raise TypeError('No connector for: %r, available: %r' % (con_type, self.__connectors))
def __enter__(self):
"""Proxies the transaction to the underlying relevant connection."""
return self.RelevantConnection().__enter__()
def __exit__(self, *args):
"""Proxies the transaction to the underlying relevant connection."""
return self.RelevantConnection().__exit__(*args)
def __getattr__(self, attribute):
return getattr(self.RelevantConnection(), attribute)
def RollbackAll(self):
"""Performas a rollback on all connectors with pending commits"""
if self.debug:
print('Rolling back uncommited transaction on all connectors.')
for classname in self.__connections:
try:
self.__connections[classname].Rollback()
except NotImplementedError:
pass
def PostRequest(self):
"""This cleans up any non persistent connections."""
cleanups = []
for classname in self.__connections:
if (hasattr(self.__connections[classname], 'PERSISTENT') and
not self.__connections[classname].PERSISTENT):
cleanups.append(classname)
for classname in cleanups:
try:
self.__connections[classname].Disconnect()
except (NotImplementedError, TypeError, ConnectionError):
pass
del(self.__connections[classname])
def __iter__(self):
"""Pass tru to the Relevant connection as an Iterable, so variable unpacking
can be used by the consuming class. This is used in the SecureCookie Model
class."""
return iter(self.RelevantConnection())
def __del__(self):
"""Cleans up all references, and closes all connectors"""
print('Deleting model connections.')
for classname in self.__connectors:
if not hasattr(self.__connectors[classname], 'connection'):
continue
try:
self.__connections[classname].Disconnect()
except (NotImplementedError, TypeError, ConnectionError):
pass
class Connector(object):
"""Base Connector class, subclass from this to create your own connectors.
Usually the name of your class is used to lookup its config in the
configuration file, or the database or local filename.
Connectors based on this class are Usually Singletons. One global connection
is kept alive, and multiple model classes use it to connect to their
respective tables, cookies, or files.
"""
_NAME = None
@classmethod
def Name(cls):
"""Returns the 'connector' name, which is usally used to lookup its config
in the config file.
If this is not explicitly defined by the class constant `_TABLE`, the return
value will be the class name with the first letter lowercased.
"""
if cls._NAME:
return cls._NAME
name = cls.__name__
return name[0].lower() + name[1:]
def Disconnect(self):
"""Standard interface to disconnect from data source"""
raise NotImplementedError
def Rollback(self):
"""Standard interface to rollback any pending commits"""
raise NotImplementedError
class SignedCookie(Connector):
"""Adds a signed cookie connection to the connection manager object.
The name of the class is used as the Cookiename"""
PERSISTENT = False
def __init__(self, config, options, request, debug=False):
"""Sets up the local connection to the signed cookie store, and generates a
new secret key if no key can be found in the config"""
# Generating random seeds on uWeb3 startup or fetch from config
self.debug = debug
try:
self.options = options[self.Name()]
self.secure_cookie_secret = self.options['secret']
except KeyError:
secret = self.GenerateNewKey()
config.Create(self.Name(), 'secret', secret)
if self.debug:
print('SignedCookie: Wrote new secret random to config.')
self.secure_cookie_secret = secret
self.connection = (request, request.vars['cookie'], self.secure_cookie_secret)
@staticmethod
def GenerateNewKey(length=128):
return b64encode(os.urandom(length)).decode('utf-8')
class Mysql(Connector):
"""Adds MySQL support to connection manager object."""
def __init__(self, config, options, request, debug=False):
"""Returns a MySQL database connection."""
self.debug = debug
self.options = {'host': 'localhost',
'user': None,
'password': None,
'database': ''}
try:
from .libs.sqltalk import mysql
try:
self.options = options[self.Name()]
except KeyError:
pass
self.connection = mysql.Connect(
host=self.options.get('host', 'localhost'),
user=self.options.get('user'),
passwd=self.options.get('password'),
db=self.options.get('database'),
charset=self.options.get('charset', 'utf8'),
debug=self.debug)
except Exception as e:
raise ConnectionError('Connection to "%s" of type "%s" resulted in: %r' % (self.Name(), type(self), e))
def Rollback(self):
with self.connection as cursor:
return cursor.Execute("ROLLBACK")
def Disconnect(self):
"""Closes the MySQL connection."""
if self.debug:
print('%s closed connection to: %r' % (self.Name(), self.options.get('database')))
self.connection.close()
del(self.connection)
class Mongo(Connector):
"""Adds MongoDB support to connection manager object."""
def __init__(self, config, options, request, debug=False):
"""Returns a MongoDB database connection."""
self.debug = debug
import pymongo
self.options = options.get(self.Name(), {})
try:
self.connection = pymongo.connection.Connection(
host=self.options.get('host', 'localhost'),
port=self.options.get('port', 27017))
if 'database' in self.options:
self.connection = self.connection[self.options['database']]
except Exception as e:
raise ConnectionError('Connection to "%s" of type "%s" resulted in: %r' % (self.Name(), type(self), e))
def Disconnect(self):
"""Closes the Mongo connection."""
if self.debug:
print('%s closed connection to: %r' % (self.Name(), self.options.get('database', 'Unspecified')))
self.connection.close()
del(self.connection)
class SqlAlchemy(Connector):
"""Adds MysqlAlchemy connection to ConnectionManager."""
def __init__(self, config, options, request, debug=False):
"""Returns a Mysql database connection wrapped in a SQLAlchemy session."""
from sqlalchemy.orm import sessionmaker
self.debug = debug
self.options = {'host': 'localhost',
'user': None,
'password': None,
'database': ''}
try:
self.options = options[self.Name()]
except KeyError:
pass
Session = sessionmaker()
Session.configure(bind=self.engine, expire_on_commit=False)
try:
self.connection = Session()
except Exception as e:
raise ConnectionError('Connection to "%s" of type "%s" resulted in: %r' % (self.Name(), type(self), e))
def engine(self):
from sqlalchemy import create_engine
return create_engine('mysql://{username}:{password}@{host}/{database}'.format(
username=self.options.get('user'),
password=self.options.get('password'),
host=self.options.get('host', 'localhost'),
database=self.options.get('database')),
pool_size=5,
max_overflow=0,
encoding=self.options.get('charset', 'utf8'),)
class Sqlite(Connector):
"""Adds SQLite support to connection manager object."""
def __init__(self, config, options, request, debug=False):
"""Returns a SQLite database connection.
The name of the class is used as the local filename.
"""
from .libs.sqltalk import sqlite
self.debug = debug
self.options = options[self.Name()]
try:
self.connection = sqlite.Connect(self.options.get('database'))
except Exception as e:
raise ConnectionError('Connection to "%s" of type "%s" resulted in: %r' % (self.Name(), type(self), e))
def Rollback(self):
"""Rolls back any uncommited transactions."""
return self.connection.rollback()
def Disconnect(self):
"""Closes the SQLite connection."""
if self.debug:
print('%s closed connection to: %r' % (self.Name(), self.options.get('database')))
self.connection.close()
del(self.connection)
| 35.37931
| 109
| 0.683856
|
667c5e4bd9fd065108749849979bbfc8087b34aa
| 1,380
|
py
|
Python
|
python_serial_sender/sender.py
|
vvzen/arduino-python-serial-communication-example
|
6fb5175266f490acf49ac2ff1c119dfd0b2ee938
|
[
"MIT"
] | null | null | null |
python_serial_sender/sender.py
|
vvzen/arduino-python-serial-communication-example
|
6fb5175266f490acf49ac2ff1c119dfd0b2ee938
|
[
"MIT"
] | null | null | null |
python_serial_sender/sender.py
|
vvzen/arduino-python-serial-communication-example
|
6fb5175266f490acf49ac2ff1c119dfd0b2ee938
|
[
"MIT"
] | null | null | null |
import time
import serial
ARDUINO_URL = "/dev/tty.usbmodem142301"
MAX_WAIT = 32 # seconds
ARDUINO_RESET_WAIT_TIME = 5 # seconds
END_MESSAGE_DELIMITER = b">"
def hello_world():
serial_device = serial.Serial(ARDUINO_URL, 9600, timeout=1)
print("Will talk to ", serial_device.name)
# This is fundamental! Give enough time to arduino to reset
# before trying to write and read from the serial
print("Waiting %i seconds for arduino to reset.. " % ARDUINO_RESET_WAIT_TIME)
time.sleep(ARDUINO_RESET_WAIT_TIME)
hello_world_message = b"<MoveX10Y10>"
print("Sending ", hello_world_message.decode("ascii"))
serial_device.write(hello_world_message)
current_message = []
i = 0
while True:
current_char = serial_device.read(1)
current_message.append(current_char)
# print(current_char)
if current_char == END_MESSAGE_DELIMITER:
break
i += 1
if i > MAX_WAIT:
print("Closing communication, timeout was reached without "
"finding end of message ", END_MESSAGE_DELIMITER)
break
# Read as bytes, decode them into ascii
message_as_ascii = "".join([c.decode("ascii") for c in current_message])
print("Received ", message_as_ascii)
serial_device.close()
def main():
hello_world()
if __name__ == '__main__':
main()
| 27.058824
| 81
| 0.671739
|
5f65efd5f67fbfc56de8446a2fa219df0cfc8e45
| 10,780
|
py
|
Python
|
boundlexx/api/v1/serializers/world.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-23T11:49:50.000Z
|
2021-04-23T11:49:50.000Z
|
boundlexx/api/v1/serializers/world.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | 1
|
2021-04-17T18:17:12.000Z
|
2021-04-17T18:17:12.000Z
|
boundlexx/api/v1/serializers/world.py
|
AngellusMortis/boundlexx
|
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from boundlexx.api.common.serializers import (
BlockColorSerializer,
ItemColorSerializer,
PossibleWBCSerializer,
ResourcesSerializer,
SimpleWorldSerializer,
WorldBlockColorSerializer,
WorldColorSerializer,
WorldDistanceSerializer,
WorldPollLeaderboardSerializer,
WorldPollResourcesSerializer,
WorldPollSerializer,
WorldSerializer,
)
from boundlexx.api.v1.serializers.base import (
NestedHyperlinkedIdentityField,
RequestBasketsURL,
ShopStandsURL,
SimpleColorSerializer,
URLSimpleItemSerializer,
URLSimpleSkillSerializer,
URLSimpleWorldSerializer,
)
from boundlexx.boundless.models import (
ResourceCount,
World,
WorldBlockColor,
WorldDistance,
WorldPoll,
)
class URLWorldSerializer(WorldSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="world-detail",
lookup_field="id",
)
polls_url = NestedHyperlinkedIdentityField(
view_name="world-poll-list",
lookup_field=["id"],
lookup_url_kwarg=["world_id"],
)
block_colors_url = serializers.HyperlinkedIdentityField(
view_name="world-block-colors",
lookup_field="id",
)
distances_url = NestedHyperlinkedIdentityField(
view_name="world-distance-list",
lookup_field=["id"],
lookup_url_kwarg=["world_source__id"],
)
request_baskets_url = RequestBasketsURL()
shop_stands_url = ShopStandsURL()
assignment = URLSimpleWorldSerializer(allow_null=True)
protection_skill = URLSimpleSkillSerializer()
class Meta:
model = World
fields = [
"url",
"id",
"polls_url",
"block_colors_url",
"distances_url",
"request_baskets_url",
"next_request_basket_update",
"shop_stands_url",
"next_shop_stand_update",
"active",
"name",
"display_name",
"text_name",
"html_name",
"address",
"image_url",
"forum_url",
"assignment",
"region",
"tier",
"size",
"world_type",
"special_type",
"protection_points",
"protection_skill",
"time_offset",
"last_updated",
"is_sovereign",
"is_perm",
"is_exo",
"is_creative",
"is_locked",
"is_public",
"is_public_edit",
"is_public_claim",
"is_finalized",
"number_of_regions",
"start",
"end",
"atmosphere_color",
"water_color",
"surface_liquid",
"core_liquid",
"bows",
"atlas_image_url",
]
class URLWorldPollLeaderboardSerializer(WorldPollLeaderboardSerializer):
world_poll_url = NestedHyperlinkedIdentityField(
view_name="world-poll-detail",
lookup_field=["world.id", "id"],
lookup_url_kwarg=["world_id", "id"],
read_only=True,
)
class Meta:
model = WorldPoll
fields = ["world_poll_id", "world_poll_url", "leaderboard"]
class URLResourcesSerializer(ResourcesSerializer):
item = URLSimpleItemSerializer()
class Meta:
model = ResourceCount
fields = ["item", "is_embedded", "percentage", "count", "average_per_chunk"]
class URLWorldPollResourcesSerializer(WorldPollResourcesSerializer):
world_poll_url = NestedHyperlinkedIdentityField(
view_name="world-poll-detail",
lookup_field=["world.id", "id"],
lookup_url_kwarg=["world_id", "id"],
read_only=True,
)
resources = URLResourcesSerializer(many=True)
class Meta:
model = WorldPoll
fields = ["world_poll_id", "world_poll_url", "resources"]
class URLWorldBlockColorSerializer(WorldBlockColorSerializer):
item = URLSimpleItemSerializer()
color = SimpleColorSerializer()
is_perm = serializers.BooleanField()
is_sovereign_only = serializers.BooleanField()
is_exo_only = serializers.BooleanField()
days_since_exo = serializers.IntegerField(allow_null=True)
days_since_transform_exo = serializers.IntegerField(allow_null=True)
first_world = URLSimpleWorldSerializer(allow_null=True)
last_exo = URLSimpleWorldSerializer(allow_null=True)
transform_first_world = URLSimpleWorldSerializer(allow_null=True)
transform_last_exo = URLSimpleWorldSerializer(allow_null=True)
class Meta:
model = WorldBlockColor
fields = [
"item",
"color",
"active",
"is_default",
"is_perm",
"is_sovereign_only",
"is_exo_only",
"is_new",
"is_new_exo",
"is_new_transform",
"days_since_exo",
"days_since_transform_exo",
"first_world",
"last_exo",
"transform_first_world",
"transform_last_exo",
]
class URLWorldDistanceSerializer(WorldDistanceSerializer):
world_source = URLSimpleWorldSerializer()
world_dest = URLSimpleWorldSerializer()
class Meta:
model = WorldDistance
fields = [
"world_source",
"world_dest",
"distance",
"cost",
"min_portal_cost",
"min_portal_open_cost",
"min_conduits",
]
class URLBlockColorSerializer(BlockColorSerializer):
item = URLSimpleItemSerializer()
world = URLSimpleWorldSerializer()
first_world = URLSimpleWorldSerializer(allow_null=True)
last_exo = URLSimpleWorldSerializer(allow_null=True)
transform_first_world = URLSimpleWorldSerializer(allow_null=True)
transform_last_exo = URLSimpleWorldSerializer(allow_null=True)
class Meta:
model = WorldBlockColor
fields = [
"item",
"world",
"active",
"is_default",
"is_perm",
"is_sovereign_only",
"is_exo_only",
"is_new",
"is_new_exo",
"is_new_transform",
"days_since_exo",
"days_since_transform_exo",
"first_world",
"last_exo",
"transform_first_world",
"transform_last_exo",
]
class URLItemColorSerializer(ItemColorSerializer):
color = SimpleColorSerializer()
first_world = URLSimpleWorldSerializer(allow_null=True)
last_exo = URLSimpleWorldSerializer(allow_null=True)
transform_first_world = URLSimpleWorldSerializer(allow_null=True)
transform_last_exo = URLSimpleWorldSerializer(allow_null=True)
class Meta:
model = WorldBlockColor
fields = [
"color",
"active",
"is_default",
"is_perm",
"is_sovereign_only",
"is_exo_only",
"is_new",
"is_new_exo",
"is_new_transform",
"days_since_exo",
"days_since_transform_exo",
"first_world",
"last_exo",
"transform_first_world",
"transform_last_exo",
]
class PossibleColorSerializer(PossibleWBCSerializer):
color = SimpleColorSerializer()
class Meta:
model = WorldBlockColor
fields = [
"color",
]
class PossibleItemSerializer(serializers.ModelSerializer):
item = URLSimpleItemSerializer()
class Meta:
model = WorldBlockColor
fields = [
"item",
]
class URLWorldColorSerializer(WorldColorSerializer):
world = URLSimpleWorldSerializer()
first_world = URLSimpleWorldSerializer(allow_null=True)
last_exo = URLSimpleWorldSerializer(allow_null=True)
transform_first_world = URLSimpleWorldSerializer(allow_null=True)
transform_last_exo = URLSimpleWorldSerializer(allow_null=True)
class Meta:
model = WorldBlockColor
fields = [
"color",
"world",
"active",
"is_default",
"is_perm",
"is_sovereign_only",
"is_exo_only",
"is_new",
"is_new_exo",
"is_new_transform",
"days_since_exo",
"days_since_transform_exo",
"first_world",
"last_exo",
"transform_first_world",
"transform_last_exo",
]
class WorldBlockColorsViewSerializer(
serializers.Serializer
): # pylint: disable=abstract-method
world_url = serializers.HyperlinkedIdentityField(
view_name="world-detail",
lookup_field="id",
read_only=True,
)
block_colors = URLWorldBlockColorSerializer(many=True, read_only=True)
class URLWorldPollSerializer(WorldPollSerializer):
url = NestedHyperlinkedIdentityField(
view_name="world-poll-detail",
lookup_field=["world.id", "id"],
lookup_url_kwarg=["world_id", "id"],
read_only=True,
)
leaderboard_url = NestedHyperlinkedIdentityField(
view_name="world-poll-leaderboard",
lookup_field=["world.id", "id"],
lookup_url_kwarg=["world_id", "id"],
read_only=True,
)
resources_url = NestedHyperlinkedIdentityField(
view_name="world-poll-resources",
lookup_field=["world.id", "id"],
lookup_url_kwarg=["world_id", "id"],
read_only=True,
)
world = URLSimpleWorldSerializer()
class Meta:
model = WorldPoll
fields = [
"url",
"id",
"leaderboard_url",
"resources_url",
"time",
"world",
"player_count",
"beacon_count",
"plot_count",
"total_prestige",
]
class KindOfSimpleWorldSerializer(SimpleWorldSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="world-detail",
lookup_field="id",
)
class Meta:
model = World
fields = [
"url",
"id",
"active",
"image_url",
"name",
"display_name",
"text_name",
"html_name",
"world_class",
"tier",
"size",
"world_type",
"region",
"address",
"special_type",
"last_updated",
"is_sovereign",
"is_perm",
"is_exo",
"is_creative",
"is_locked",
"is_public",
"is_public_edit",
"is_public_claim",
"atlas_image_url",
]
| 27.641026
| 84
| 0.589889
|
906fa70ef90f7290724b60ee60af3812b8db4440
| 982
|
py
|
Python
|
bindings/python/examples/ispace.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/examples/ispace.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
bindings/python/examples/ispace.py
|
stkaplan/legion
|
ad82a1c1f39ed20a16df29aa331428d42c0ecfb6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import pygion
from pygion import task, Ispace, RW
@task
def main():
d = Ispace(10)
t = 0
for x in d:
print(x)
t += int(x)
assert t == 45
d2 = Ispace([3, 3], [1, 1])
t2 = 0
for x in d2:
print(x)
t2 += x[0] * x[1]
assert t2 == 36
if __name__ == '__main__':
main()
| 23.95122
| 74
| 0.664969
|
6e8ec8675682c91053215bdfe303b385987104df
| 426
|
py
|
Python
|
LTC4099_constants.py
|
chisl/LTC4099-py2
|
b30b58c27eedc7a064059e0d7a9ed4466a13a0b6
|
[
"MIT"
] | null | null | null |
LTC4099_constants.py
|
chisl/LTC4099-py2
|
b30b58c27eedc7a064059e0d7a9ed4466a13a0b6
|
[
"MIT"
] | null | null | null |
LTC4099_constants.py
|
chisl/LTC4099-py2
|
b30b58c27eedc7a064059e0d7a9ed4466a13a0b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""LTC4099: I2C Controlled USB Power Manager/Charger with Overvoltage Protection"""
__author__ = "ChISL"
__copyright__ = "TBD"
__credits__ = ["Linear Technology"]
__license__ = "TBD"
__version__ = "0.1"
__maintainer__ = "https://chisl.io"
__email__ = "info@chisl.io"
__status__ = "Test"
class REG:
COMMAND_0 = 0
COMMAND_1 = 1
IRQ_MASK = 2
OUTPUT = 3
| 22.421053
| 83
| 0.661972
|
1b3ab150aaf325723b197d105dfc2d1ee634ff5e
| 264
|
py
|
Python
|
tests/artificial/transf_Logit/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Logit_Lag1Trend_12_12_100.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Logit/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Logit_Lag1Trend_12_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Logit/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Logit_Lag1Trend_12_12_100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Logit", sigma = 0.0, exog_count = 100, ar_order = 12);
| 37.714286
| 164
| 0.731061
|
a6d8ea49a252d1cfab2a78eadf25dbd70115aa1a
| 586
|
py
|
Python
|
wc.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 7
|
2022-01-15T02:53:53.000Z
|
2022-02-17T00:51:32.000Z
|
wc.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 8
|
2022-01-16T02:45:24.000Z
|
2022-03-21T02:08:27.000Z
|
wc.py
|
HansGR/WorldsCollide
|
af227be553e120ee004b130598360c61daf7df59
|
[
"MIT"
] | 5
|
2022-01-15T02:53:38.000Z
|
2022-01-19T17:42:10.000Z
|
def main():
import args
import log
from memory.memory import Memory
memory = Memory()
from data.data import Data
data = Data(memory.rom, args)
from event.events import Events
events = Events(memory.rom, args, data)
from menus.menus import Menus
menus = Menus(data.characters, data.dances)
from battle import Battle
battle = Battle()
from settings import Settings
settings = Settings()
from bug_fixes import BugFixes
bug_fixes = BugFixes()
data.write()
memory.write()
if __name__ == '__main__':
main()
| 18.903226
| 47
| 0.658703
|
a118e2c7c7d2367792ee43adc2e63b575a16051b
| 7,463
|
py
|
Python
|
purity_fb/purity_fb_1dot6/__init__.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/__init__.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot6/__init__.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.6 Python SDK
Pure Storage FlashBlade REST 1.X Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.admin import Admin
from .models.admin_api_token import AdminApiToken
from .models.admin_cache import AdminCache
from .models.admin_cache_response import AdminCacheResponse
from .models.admin_response import AdminResponse
from .models.alert import Alert
from .models.alert_response import AlertResponse
from .models.alert_watcher import AlertWatcher
from .models.alert_watcher_response import AlertWatcherResponse
from .models.alert_watcher_test import AlertWatcherTest
from .models.alert_watcher_test_response import AlertWatcherTestResponse
from .models.array_http_performance import ArrayHttpPerformance
from .models.array_http_performance_response import ArrayHttpPerformanceResponse
from .models.array_performance import ArrayPerformance
from .models.array_performance_response import ArrayPerformanceResponse
from .models.array_response import ArrayResponse
from .models.array_s3_performance import ArrayS3Performance
from .models.array_s3_performance_response import ArrayS3PerformanceResponse
from .models.array_space import ArraySpace
from .models.array_space_response import ArraySpaceResponse
from .models.blade import Blade
from .models.blade_response import BladeResponse
from .models.bucket import Bucket
from .models.bucket_patch import BucketPatch
from .models.bucket_post import BucketPost
from .models.bucket_response import BucketResponse
from .models._built_in import BuiltIn
from .models.certificate import Certificate
from .models.certificate_response import CertificateResponse
from .models.client_performance import ClientPerformance
from .models.client_performance_response import ClientPerformanceResponse
from .models.directory_service import DirectoryService
from .models.directory_service_response import DirectoryServiceResponse
from .models.directory_service_role import DirectoryServiceRole
from .models.directory_service_roles_response import DirectoryServiceRolesResponse
from .models.directoryservice_smb import DirectoryserviceSmb
from .models.dns import Dns
from .models.dns_response import DnsResponse
from .models.error_response import ErrorResponse
from .models.file_system import FileSystem
from .models.file_system_performance import FileSystemPerformance
from .models.file_system_performance_response import FileSystemPerformanceResponse
from .models.file_system_response import FileSystemResponse
from .models.file_system_snapshot import FileSystemSnapshot
from .models.file_system_snapshot_response import FileSystemSnapshotResponse
from .models._fixed_reference import FixedReference
from .models.hardware import Hardware
from .models.hardware_connector import HardwareConnector
from .models.hardware_connector_response import HardwareConnectorResponse
from .models.hardware_response import HardwareResponse
from .models.link_aggregation_group import LinkAggregationGroup
from .models.link_aggregation_group_response import LinkAggregationGroupResponse
from .models.linkaggregationgroup import Linkaggregationgroup
from .models.login_response import LoginResponse
from .models.network_interface import NetworkInterface
from .models.network_interface_response import NetworkInterfaceResponse
from .models.nfs_rule import NfsRule
from .models.object_response import ObjectResponse
from .models.object_store_access_key import ObjectStoreAccessKey
from .models.object_store_access_key_response import ObjectStoreAccessKeyResponse
from .models.object_store_account import ObjectStoreAccount
from .models.object_store_account_response import ObjectStoreAccountResponse
from .models.object_store_user import ObjectStoreUser
from .models.object_store_user_response import ObjectStoreUserResponse
from .models.objectstoreaccesskey import Objectstoreaccesskey
from .models.pagination_info import PaginationInfo
from .models.policy import Policy
from .models.policy_objects import PolicyObjects
from .models.policy_objects_response import PolicyObjectsResponse
from .models.policy_patch import PolicyPatch
from .models._policy_reference import PolicyReference
from .models.policy_response import PolicyResponse
from .models.protocol_rule import ProtocolRule
from .models.pure_array import PureArray
from .models.pure_error import PureError
from .models.pure_object import PureObject
from .models.quotas_group import QuotasGroup
from .models.quotas_group_response import QuotasGroupResponse
from .models.quotas_user import QuotasUser
from .models.quotas_user_response import QuotasUserResponse
from .models.quotasgroup_group import QuotasgroupGroup
from .models.quotasuser_user import QuotasuserUser
from .models.reference import Reference
from .models._resource import Resource
from .models._resource_rule import ResourceRule
from .models._resource_type import ResourceType
from .models.smb_rule import SmbRule
from .models.smtp import Smtp
from .models.smtp_response import SmtpResponse
from .models.snapshot_suffix import SnapshotSuffix
from .models.space import Space
from .models.subnet import Subnet
from .models.subnet_response import SubnetResponse
from .models.support import Support
from .models.support_remote_assist_paths import SupportRemoteAssistPaths
from .models.support_response import SupportResponse
from .models.test_result import TestResult
from .models.test_result_response import TestResultResponse
from .models.version_response import VersionResponse
# import apis into sdk package
from .apis.admins_api import AdminsApi
from .apis.admins_cache_api import AdminsCacheApi
from .apis.alert_watchers_api import AlertWatchersApi
from .apis.alerts_api import AlertsApi
from .apis.arrays_api import ArraysApi
from .apis.authentication_api import AuthenticationApi
from .apis.blade_api import BladeApi
from .apis.buckets_api import BucketsApi
from .apis.certificates_api import CertificatesApi
from .apis.directory_services_api import DirectoryServicesApi
from .apis.dns_api import DnsApi
from .apis.file_system_snapshots_api import FileSystemSnapshotsApi
from .apis.file_systems_api import FileSystemsApi
from .apis.hardware_api import HardwareApi
from .apis.hardware_connectors_api import HardwareConnectorsApi
from .apis.link_aggregation_groups_api import LinkAggregationGroupsApi
from .apis.logs_api import LogsApi
from .apis.network_interfaces_api import NetworkInterfacesApi
from .apis.object_store_access_keys_api import ObjectStoreAccessKeysApi
from .apis.object_store_accounts_api import ObjectStoreAccountsApi
from .apis.object_store_users_api import ObjectStoreUsersApi
from .apis.policies_api import PoliciesApi
from .apis.quotas_groups_api import QuotasGroupsApi
from .apis.quotas_users_api import QuotasUsersApi
from .apis.smtp_api import SmtpApi
from .apis.subnets_api import SubnetsApi
from .apis.support_api import SupportApi
from .apis.usage_groups_api import UsageGroupsApi
from .apis.usage_users_api import UsageUsersApi
from .apis.version_api import VersionApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
| 48.148387
| 204
| 0.874715
|
ddb31abd0d9e9f8e730b621fc194ec33325db38d
| 676
|
py
|
Python
|
Container/LineList.py
|
ervin-meng/pyspider
|
cb98eed7f61a9ebde39a42d469e7fd93b06e15aa
|
[
"MIT"
] | null | null | null |
Container/LineList.py
|
ervin-meng/pyspider
|
cb98eed7f61a9ebde39a42d469e7fd93b06e15aa
|
[
"MIT"
] | null | null | null |
Container/LineList.py
|
ervin-meng/pyspider
|
cb98eed7f61a9ebde39a42d469e7fd93b06e15aa
|
[
"MIT"
] | null | null | null |
# -*- coding=UTF-8 -*-
import redis
class LineList:
_type = 'stack' #queue
_name = ''
_media = ''
def __init__(self,name='',media='redis'):
self._name = name
if media=='redis':
self._media = redis.Redis(host='127.0.0.1',port=6379,decode_responses=True)
def add(self,data):
return self._media.rpush(self._name,data)
def get(self):
if(self._type=='queue'):
return self._media.lpop(self._name)
else:
return self._media.rpop(self._name)
def llen(self):
return self._media.llen(self._name)
def clean(self):
return self._media.delete(self._name)
| 24.142857
| 87
| 0.578402
|
36a3f80c653ff0f67ba5a6ae16e450915d71366b
| 5,699
|
py
|
Python
|
Rain_fall_in_Australia.py
|
HeyKashit/Rain-fall-predication-in-Australia
|
e56d2f652352409d62c76652011d9afa2d9ec4f8
|
[
"MIT"
] | 5
|
2021-08-16T17:17:02.000Z
|
2021-09-26T10:45:20.000Z
|
Rain_fall_in_Australia.py
|
HeyKashit/Rain-fall-predication-in-Australia
|
e56d2f652352409d62c76652011d9afa2d9ec4f8
|
[
"MIT"
] | null | null | null |
Rain_fall_in_Australia.py
|
HeyKashit/Rain-fall-predication-in-Australia
|
e56d2f652352409d62c76652011d9afa2d9ec4f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # This is Rain fall predication in Australia and EDA.
# ### Importing Modules
# In[1]:
# importing modules
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import warnings
warnings.filterwarnings(action="ignore")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix,classification_report,roc_curve, roc_auc_score
# ### Importing Dataset
# In[3]:
df = pd.read_csv("weatherAUS.csv",nrows=10000)
df.head().T
# In[4]:
# Info of data
df.info()
# In[5]:
# shape of data:
print(f'Number of columns: { df.shape[0]} and Number of rows: {df.shape[1]}')
# In[6]:
# Checking for null values
df.isna().sum()
# In[7]:
# statistical info of dataset
df.describe().T
# In[8]:
df.describe()
# In[9]:
# Identifying Continuous and Categorical Columns
category=[]
contin = []
for i in df.columns:
if df[i].dtype =="object":
category.append(i)
else:
contin.append(i)
print("Categorical:",category)
print("Continuous:", contin)
# In[10]:
df.head()
# **Encoding RainToday and RainTomorrow Columns** using LabelEncoder
# In[11]:
df['RainTomorrow'] = df['RainTomorrow'].map({'Yes': 1, 'No': 0})
df['RainToday'] = df['RainToday'].map({'Yes': 1, 'No': 0})
# In[12]:
df["RainToday"].unique()
# In[13]:
df["RainTomorrow"].unique()
# In[14]:
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
# In[15]:
df[["RainToday","RainTomorrow"]]
# Percentage of **Null values in dataset**
# In[16]:
(df.isnull().sum()/len(df))*100
# In[17]:
(df.isnull().sum()/len(df))
# In[18]:
df.head().T
# In[19]:
df.columns
# ### Handling Null values
# In[20]:
# filling the missing values for continuous variables with mean
df["MinTemp"]= df["MinTemp"].fillna(df["MinTemp"].mean())
df["MaxTemp"]= df["MaxTemp"].fillna(df["MaxTemp"].mean())
df["Evaporation"]= df["Evaporation"].fillna(df["Evaporation"].mean())
df["Sunshine"]= df["Sunshine"].fillna(df["Sunshine"].mean())
df["WindGustSpeed"]= df["WindGustSpeed"].fillna(df["WindGustSpeed"].mean())
df["Rainfall"]= df["Rainfall"].fillna(df["Rainfall"].mean())
df["WindSpeed9am"]= df["WindSpeed9am"].fillna(df["WindSpeed9am"].mean())
df["WindSpeed3pm"]= df["WindSpeed3pm"].fillna(df["WindSpeed3pm"].mean())
df["Humidity9am"]= df["Humidity9am"].fillna(df["Humidity9am"].mean())
df["Humidity3pm"]= df["Humidity3pm"].fillna(df["Humidity3pm"].mean())
df["Pressure9am"]= df["Pressure9am"].fillna(df["Pressure9am"].mean())
df["Pressure3pm"]= df["Pressure3pm"].fillna(df["Pressure3pm"].mean())
df["Cloud9am"]= df["Cloud9am"].fillna(df["Cloud9am"].mean())
df["Cloud3pm"]= df["Cloud3pm"].fillna(df["Cloud3pm"].mean())
df["Temp9am"]= df["Temp9am"].fillna(df["Temp9am"].mean())
df["Temp3pm"]= df["Temp3pm"].fillna(df["Temp3pm"].mean())
# In[21]:
#Filling the missing values for continuous variables with mode
df['RainToday']=df['RainToday'].fillna(df['RainToday'].mode()[0])
df['RainTomorrow']=df['RainTomorrow'].fillna(df['RainTomorrow'].mode()[0])
df['WindDir9am'] = df['WindDir9am'].fillna(df['WindDir9am'].mode()[0])
df['WindGustDir'] = df['WindGustDir'].fillna(df['WindGustDir'].mode()[0])
df['WindDir3pm'] = df['WindDir3pm'].fillna(df['WindDir3pm'].mode()[0])
# In[22]:
df.head()
# In[23]:
# again checking for null values
(df.isnull().sum()/len(df))*100
# ### **Countplot** for RainToday and Raintomorrow:
# In[24]:
fig, ax =plt.subplots(1,2)
plt.figure(figsize=(8,5))
sns.countplot(df["RainToday"],ax=ax[0])
sns.countplot(df["RainTomorrow"],ax = ax[1])
# ### Heatmap showing **Correlation** among attributes of data
# In[25]:
#heatmap
plt.figure(figsize=(18,12))
sns.heatmap(df.corr(), annot=True)
plt.xticks(rotation=90)
plt.show()
# **Inferences from Heatmap**:
# * MinTemp and Temp9am highly correlated.
# * MinTemp and Temp3pm highly correlated.
# * MaxTemp and Temp9am highly correlated.
# * MaxTemp and Temp3pm highly correlated.
# * Temp3pm and Temp9am highly correlated.
# * Humidity9am and Humidity3pm highly correlated.
# In[26]:
#encoding remaining columns
df["Location"] = le.fit_transform(df["Location"])
df["WindDir9am"]= le.fit_transform(df["WindDir9am"])
df["WindDir3pm"]= le.fit_transform(df["WindDir3pm"])
df["WindGustDir"] = le.fit_transform(df["WindGustDir"])
# In[27]:
df.head()
# In[28]:
# Dropping highly correlated columns
df=df.drop(['Temp3pm','Temp9am','Humidity9am',"Date"],axis=1)
df.columns
# In[29]:
df.head()
# In[30]:
x=df.drop(['RainTomorrow','Location','WindGustDir','WindGustSpeed','WindDir3pm','WindDir9am','WindSpeed3pm','Pressure3pm','Cloud3pm','Evaporation','RainToday','Pressure9am','WindSpeed9am'],axis=1)
y=df['RainTomorrow']
x.columns
# ### Splitting data into Training and Testing Set
# In[31]:
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# ### RandomforestClassifier
# In[32]:
model=RandomForestClassifier()
model.fit(X_train,y_train)
# In[33]:
# accuracy of RandomForest Model
y_predxgb = model.predict(X_test)
report = classification_report(y_test, y_predxgb)
print(report)
print("Accuracy of the RandomForest Model is:",accuracy_score(y_test,y_predxgb)*100,"%")
cm = confusion_matrix(y_test, y_predxgb)
sns.heatmap(cm, annot=True,cmap="YlGnBu")
plt.title("Confusion Matrix for RandomForest Model")
plt.show()
# In[34]:
import pickle
pickle_out = open("Finalmodel.pkl","wb")
pickle.dump(model, pickle_out)
pickle_out.close()
# In[35]:
df
# In[ ]:
# In[ ]:
| 17.535385
| 196
| 0.684331
|
6b7aee7b98cdf372573e4b7565f5301fc345a582
| 397
|
py
|
Python
|
backend/test_32387/wsgi.py
|
crowdbotics-apps/test-32387
|
a415af4c96bb26031486d8f66f5e6df4a398d7dd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/test_32387/wsgi.py
|
crowdbotics-apps/test-32387
|
a415af4c96bb26031486d8f66f5e6df4a398d7dd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/test_32387/wsgi.py
|
crowdbotics-apps/test-32387
|
a415af4c96bb26031486d8f66f5e6df4a398d7dd
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for test_32387 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_32387.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
6365dcfd20853dd8205b7b4447f4d7388c73f03f
| 7,437
|
py
|
Python
|
python/led.py
|
wserr/audio-reactive-led-strip
|
0da690891ab028c681ca251d2d0195c41108f79f
|
[
"MIT"
] | null | null | null |
python/led.py
|
wserr/audio-reactive-led-strip
|
0da690891ab028c681ca251d2d0195c41108f79f
|
[
"MIT"
] | null | null | null |
python/led.py
|
wserr/audio-reactive-led-strip
|
0da690891ab028c681ca251d2d0195c41108f79f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
import platform
import numpy as np
import config
# ESP8266 uses WiFi communication
if config.DEVICE == 'esp8266':
import socket
_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Raspberry Pi controls the LED strip directly
elif config.DEVICE == 'pi':
import neopixel
strip = neopixel.Adafruit_NeoPixel(config.N_PIXELS, config.LED_PIN,
config.LED_FREQ_HZ, config.LED_DMA,
config.LED_INVERT, config.BRIGHTNESS)
strip.begin()
elif config.DEVICE == 'blinkstick':
from blinkstick import blinkstick
import signal
import sys
#Will turn all leds off when invoked.
def signal_handler(signal, frame):
all_off = [0]*(config.N_PIXELS*3)
stick.set_led_data(0, all_off)
sys.exit(0)
stick = blinkstick.find_first()
# Create a listener that turns the leds off when the program terminates
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
elif config.DEVICE == 'max7219':
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
serial = spi(port=0, device=0,gpio=noop())
device = max7219(serial, cascaded=1,block_orientation=-90)
_gamma = np.load(config.GAMMA_TABLE_PATH)
"""Gamma lookup table used for nonlinear brightness correction"""
_prev_pixels = np.tile(253, (3, config.N_PIXELS))
"""Pixel values that were most recently displayed on the LED strip"""
pixels = np.tile(1, (3, config.N_PIXELS))
"""Pixel values for the LED strip"""
_is_python_2 = int(platform.python_version_tuple()[0]) == 2
def _update_esp8266():
"""Sends UDP packets to ESP8266 to update LED strip values
The ESP8266 will receive and decode the packets to determine what values
to display on the LED strip. The communication protocol supports LED strips
with a maximum of 256 LEDs.
The packet encoding scheme is:
|i|r|g|b|
where
i (0 to 255): Index of LED to change (zero-based)
r (0 to 255): Red value of LED
g (0 to 255): Green value of LED
b (0 to 255): Blue value of LED
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optionally apply gamma correc tio
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
MAX_PIXELS_PER_PACKET = 126
# Pixel indices
idx = range(pixels.shape[1])
idx = [i for i in idx if not np.array_equal(p[:, i], _prev_pixels[:, i])]
n_packets = len(idx) // MAX_PIXELS_PER_PACKET + 1
idx = np.array_split(idx, n_packets)
for packet_indices in idx:
m = '' if _is_python_2 else []
for i in packet_indices:
if _is_python_2:
m += chr(i) + chr(p[0][i]) + chr(p[1][i]) + chr(p[2][i])
else:
m.append(i) # Index of pixel to change
m.append(p[0][i]) # Pixel red value
m.append(p[1][i]) # Pixel green value
m.append(p[2][i]) # Pixel blue value
m = m if _is_python_2 else bytes(m)
_sock.sendto(m, (config.UDP_IP, config.UDP_PORT))
_prev_pixels = np.copy(p)
def _update_pi():
"""Writes new LED values to the Raspberry Pi's LED strip
Raspberry Pi uses the rpi_ws281x to control the LED strip directly.
This function updates the LED strip with new values.
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Encode 24-bit LED values in 32 bit integers
r = np.left_shift(p[0][:].astype(int), 8)
g = np.left_shift(p[1][:].astype(int), 16)
b = p[2][:].astype(int)
rgb = np.bitwise_or(np.bitwise_or(r, g), b)
# Update the pixels
for i in range(config.N_PIXELS):
# Ignore pixels if they haven't changed (saves bandwidth)
if np.array_equal(p[:, i], _prev_pixels[:, i]):
continue
strip._led_data[i] = rgb[i]
_prev_pixels = np.copy(p)
strip.show()
def RoundToOne(n):
if n > 120:
return 1
else:
return 0
def _update_max7219():
"""Updates the max 7219 chip
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
x = 0
y = 0
for i in range(config.N_PIXELS):
with canvas(device) as draw:
if p[1][i] > 64: draw.point((x,y),fill="white")
x=x+1
if x == 8:
x=0
y=y+1
def _update_test():
"""Updates the max 7219 chip
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Encode 24-bit LED values in 32 bit integers
r = np.left_shift(p[0][:].astype(int), 8)
g = np.left_shift(p[1][:].astype(int), 16)
b = p[2][:].astype(int)
rgb = np.bitwise_or(np.bitwise_or(r, g), b)
x = 0
y = 0
for i in range(config.N_PIXELS):
if p[1][i] > 64:
print(1,end='')
else:
print(0,end='')
x=x+1
if x == 8:
x=0
y=y+1
print(" ")
def _update_blinkstick():
"""Writes new LED values to the Blinkstick.
This function updates the LED strip with new values.
"""
global pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Read the rgb values
r = p[0][:].astype(int)
g = p[1][:].astype(int)
b = p[2][:].astype(int)
#create array in which we will store the led states
newstrip = [None]*(config.N_PIXELS*3)
for i in range(config.N_PIXELS):
# blinkstick uses GRB format
newstrip[i*3] = g[i]
newstrip[i*3+1] = r[i]
newstrip[i*3+2] = b[i]
#send the data to the blinkstick
stick.set_led_data(0, newstrip)
def update():
"""Updates the LED strip values"""
if config.DEVICE == 'esp8266':
_update_esp8266()
elif config.DEVICE == 'pi':
_update_pi()
elif config.DEVICE == 'blinkstick':
_update_blinkstick()
elif config.DEVICE == 'max7219':
_update_max7219()
elif config.DEVICE == 'test':
_update_test()
else:
raise ValueError('Invalid device selected')
# Execute this file to run a LED strand test
# If everything is working, you should see a red, green, and blue pixel scroll
# across the LED strip continously
if __name__ == '__main__':
import time
# Turn all pixels off
pixels *= 0
pixels[0, 0] = 255 # Set 1st pixel red
pixels[1, 1] = 255 # Set 2nd pixel green
pixels[2, 2] = 255 # Set 3rd pixel blue
print('Starting LED strand test')
while True:
pixels = np.roll(pixels, 1, axis=1)
update()
time.sleep(.1)
| 32.194805
| 79
| 0.627269
|
816a5b78b8ee254579b6831402b421fd89de43d4
| 1,617
|
py
|
Python
|
constants.py
|
deadrobots/Create-17
|
c90c985c75f5994e17b6b9e5f1754e582b2d1310
|
[
"MIT"
] | null | null | null |
constants.py
|
deadrobots/Create-17
|
c90c985c75f5994e17b6b9e5f1754e582b2d1310
|
[
"MIT"
] | null | null | null |
constants.py
|
deadrobots/Create-17
|
c90c985c75f5994e17b6b9e5f1754e582b2d1310
|
[
"MIT"
] | null | null | null |
from wallaby import digital
# Misc
ALLOW_BUTTON_WAIT = False
START_TIME = 0
CLONE_SWITCH = 9
IS_CLONE = digital(CLONE_SWITCH)
IS_PRIME = not IS_CLONE
STARTLIGHT = 2
FRONT_BUMPED = 0
# Tophats
LEFT_TOPHAT = 0
RIGHT_TOPHAT = 5
THRESHOLD = 1500
# Servos
SERVO_CLAW = 3
SERVO_ARM = 1
SERVO_HAY_SPIN = 2
SERVO_HAY_ARM = 0
#2 1569
#3 170
#1 Same for now
# Motors
Y_ARM = 0
HAY_MOTOR = 1
# Servo Values
if IS_PRIME:
ARM_OFFSET = -200
CLAW_OFFSET = 0
HAY_ARM_OFFSET = 0
HAY_SPIN_OFFSET = 0
else:
ARM_OFFSET = -330
CLAW_OFFSET = 0
HAY_ARM_OFFSET = 0
HAY_SPIN_OFFSET = 80
ARM_BACK = 2000#2040
ARM_UP = 1647 + ARM_OFFSET
ARM_DOWN = 475 + ARM_OFFSET
ARM_DROP = 600 + ARM_OFFSET
# if IS_CLONE:
# ARM_BACK = 1600
CLAW_OPEN = 0 + CLAW_OFFSET
CLAW_CLOSE = 2047 + CLAW_OFFSET
HAY_ARM_FLAT = 1776 + HAY_ARM_OFFSET
HAY_ARM_UP = 500 + HAY_ARM_OFFSET
HAY_ARM_GATHER = 1900 + HAY_ARM_OFFSET
HAY_ARM_STORE = 0 + HAY_ARM_OFFSET
HAY_ARM_BARN = 1200 + HAY_ARM_OFFSET
HAY_ARM_START = 170 + HAY_ARM_OFFSET
HAY_ARM_START_BOX = 620 + HAY_ARM_OFFSET
HAY_ARM_PICK_UP = 1900 + HAY_ARM_OFFSET
HAY_ARM_PICK_DRIVE = 1820 + HAY_ARM_OFFSET
# if IS_CLONE:
# HAY_ARM_UP = 1270
HAY_SPIN_DRIVE = 1000 + HAY_SPIN_OFFSET
HAY_SPIN_DELIVER = 2040 + HAY_SPIN_OFFSET
HAY_SPIN_BARN = 0
# HAY_SPIN_BARN_CLONE = 250 + HAY_SPIN_OFFSET
HAY_SPIN_START = 1570 + HAY_SPIN_OFFSET
HAY_SPIN_PICK_UP = 800 + HAY_SPIN_OFFSET
# if IS_CLONE:
# HAY_SPIN_START = HAY_SPIN_DRIVE
# Drive Info
TURN_TIME = 0 # -20 # 0, 15, 40
if IS_CLONE:
TURN_TIME = 40
seeding = True
LOGFILE = "" # Leave empty
ROBOT_NAME = "Create-17"
| 19.25
| 45
| 0.737168
|
aae54f0ed01ffd698276125639b4ed160db2db92
| 11,521
|
py
|
Python
|
datalad/distribution/create_sibling_github.py
|
psychoinformatics-de/datalad
|
7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c
|
[
"MIT"
] | null | null | null |
datalad/distribution/create_sibling_github.py
|
psychoinformatics-de/datalad
|
7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c
|
[
"MIT"
] | null | null | null |
datalad/distribution/create_sibling_github.py
|
psychoinformatics-de/datalad
|
7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""High-level interface for creating a publication target on GitHub
"""
__docformat__ = 'restructuredtext'
import logging
import re
import warnings
from datalad.interface.base import (
build_doc,
Interface,
)
from datalad.interface.common_opts import (
recursion_flag,
recursion_limit,
publish_depends,
)
from datalad.interface.results import get_status_dict
from datalad.interface.utils import eval_results
from datalad.support.param import Parameter
from datalad.support.constraints import (
EnsureChoice,
EnsureNone,
EnsureStr,
)
from datalad.distribution.dataset import (
datasetmethod,
EnsureDataset,
require_dataset,
)
from datalad.distribution.siblings import Siblings
lgr = logging.getLogger('datalad.distribution.create_sibling_github')
def normalize_reponame(path):
"""Turn name (e.g. path) into a Github compliant repository name
"""
return re.sub(r'\s+', '_', re.sub(r'[/\\]+', '-', path))
@build_doc
class CreateSiblingGithub(Interface):
"""Create dataset sibling on GitHub.
An existing GitHub project, or a project created via the GitHub website can
be configured as a sibling with the :command:`siblings` command.
Alternatively, this command can create a repository under a user's GitHub
account, or any organization a user is a member of (given appropriate
permissions). This is particularly helpful for recursive sibling creation
for subdatasets. In such a case, a dataset hierarchy is represented as a
flat list of GitHub repositories.
GitHub cannot host dataset content (but LFS special remote could be used,
http://handbook.datalad.org/r.html?LFS). However, in combination with
other data sources (and siblings), publishing a dataset to GitHub can
facilitate distribution and exchange, while still allowing any dataset
consumer to obtain actual data content from alternative sources.
For GitHub authentication a personal access token is needed.
Such a token can be generated by visiting https://github.com/settings/tokens
or navigating via GitHub Web UI through:
Settings -> Developer settings -> Personal access tokens.
We will first consult Git configuration *hub.oauthtoken* for tokens possibly
available there, and then from the system credential store.
If you provide [PY: `github_login` PY][CMD: --github-login NAME CMD],
we will consider only tokens associated with that GitHub login from
*hub.oauthtoken*, and store/check the token in credential store as associated
with that specific login name.
"""
_params_ = dict(
dataset=Parameter(
args=("--dataset", "-d",),
doc="""specify the dataset to create the publication target for. If
no dataset is given, an attempt is made to identify the dataset
based on the current working directory""",
constraints=EnsureDataset() | EnsureNone()),
reponame=Parameter(
args=('reponame',),
metavar='REPONAME',
doc="""GitHub repository name. When operating recursively,
a suffix will be appended to this name for each subdataset""",
constraints=EnsureStr()),
recursive=recursion_flag,
recursion_limit=recursion_limit,
name=Parameter(
args=('-s', '--name',),
metavar='NAME',
doc="""name to represent the GitHub repository in the local
dataset installation""",
constraints=EnsureStr()),
existing=Parameter(
args=("--existing",),
constraints=EnsureChoice('skip', 'error', 'reconfigure', 'replace'),
metavar='MODE',
doc="""desired behavior when already existing or configured
siblings are discovered. In this case, a dataset can be skipped
('skip'), the sibling configuration be updated ('reconfigure'),
or process interrupts with error ('error'). DANGER ZONE: If 'replace'
is used, an existing github repository will be irreversibly removed,
re-initialized, and the sibling (re-)configured (thus implies 'reconfigure').
`replace` could lead to data loss, so use with care. To minimize
possibility of data loss, in interactive mode DataLad will ask for
confirmation, but it would raise an exception in non-interactive mode.
""",),
github_login=Parameter(
args=('--github-login',),
constraints=EnsureStr() | EnsureNone(),
metavar='NAME',
doc="""GitHub user name or access token"""),
github_organization=Parameter(
args=('--github-organization',),
constraints=EnsureStr() | EnsureNone(),
metavar='NAME',
doc="""If provided, the repository will be created under this
GitHub organization. The respective GitHub user needs appropriate
permissions."""),
access_protocol=Parameter(
args=("--access-protocol",),
constraints=EnsureChoice('https', 'ssh'),
doc="""Which access protocol/URL to configure for the sibling"""),
publish_depends=publish_depends,
private=Parameter(
args=("--private",),
action="store_true",
default=False,
doc="""If this flag is set, the repository created on github
will be marked as private and only visible to those granted
access or by membership of a team/organization/etc.
"""),
dry_run=Parameter(
args=("--dry-run",),
action="store_true",
doc="""If this flag is set, no repositories will be created.
Instead tests for name collisions with existing projects will be
performed, and would-be repository names are reported for all
relevant datasets"""),
dryrun=Parameter(
args=("--dryrun",),
action="store_true",
doc="""Deprecated. Use the renamed
[CMD: --dry-run CMD][PY: `dry_run` PY] parameter"""),
)
@staticmethod
@datasetmethod(name='create_sibling_github')
@eval_results
def __call__(
reponame,
dataset=None,
recursive=False,
recursion_limit=None,
name='github',
existing='error',
github_login=None,
github_organization=None,
access_protocol='https',
publish_depends=None,
private=False,
dryrun=False,
dry_run=False):
if dryrun and not dry_run:
# the old one is used, and not in agreement with the new one
warnings.warn(
"datalad-create-sibling-github's `dryrun` option is "
"deprecated and will be removed in a future release, "
"use the renamed `dry_run/--dry-run` option instead.",
DeprecationWarning)
dry_run = dryrun
# this is an absolute leaf package, import locally to avoid
# unnecessary dependencies
from datalad.support.github_ import _make_github_repos_
if reponame != normalize_reponame(reponame):
raise ValueError('Invalid name for a GitHub project: {}'.format(
reponame))
# what to operate on
ds = require_dataset(
dataset, check_installed=True, purpose='create GitHub sibling(s)')
res_kwargs = dict(
action='create_sibling_github [dry-run]' if dry_run else
'create_sibling_github',
logger=lgr,
refds=ds.path,
)
# gather datasets and essential info
# dataset instance and mountpoint relative to the top
toprocess = [ds]
if recursive:
for sub in ds.subdatasets(
fulfilled=None, # we want to report on missing dataset in here
recursive=recursive,
recursion_limit=recursion_limit,
result_xfm='datasets'):
if not sub.is_installed():
lgr.info('Ignoring unavailable subdataset %s', sub)
continue
toprocess.append(sub)
# check for existing remote configuration
filtered = []
for d in toprocess:
if name in d.repo.get_remotes():
yield get_status_dict(
ds=d,
status='error' if existing == 'error' else 'notneeded',
message=('already has a configured sibling "%s"', name),
**res_kwargs)
continue
gh_reponame = reponame if d == ds else \
'{}-{}'.format(
reponame,
normalize_reponame(str(d.pathobj.relative_to(ds.pathobj))))
filtered.append((d, gh_reponame))
if not filtered:
# all skipped
return
# actually make it happen on GitHub
for res in _make_github_repos_(
github_login, github_organization, filtered,
existing, access_protocol, private, dry_run):
# blend reported results with standard properties
res = dict(
res,
**res_kwargs)
if 'message' not in res:
res['message'] = ("Dataset sibling '%s', project at %s", name, res['url'])
# report to caller
yield get_status_dict(**res)
if res['status'] not in ('ok', 'notneeded'):
# something went wrong, do not proceed
continue
# lastly configure the local datasets
if not dry_run:
extra_remote_vars = {
# first make sure that annex doesn't touch this one
# but respect any existing config
'annex-ignore': 'true',
# first push should separately push active branch first
# to overcome github issue of choosing "default" branch
# alphabetically if its name does not match the default
# branch for the user (or organization) which now defaults
# to "main"
'datalad-push-default-first': 'true'
}
for var_name, var_value in extra_remote_vars.items():
var = 'remote.{}.{}'.format(name, var_name)
if var not in d.config:
d.config.add(var, var_value, where='local')
yield from Siblings()(
'configure',
dataset=d,
name=name,
url=res['url'],
recursive=False,
# TODO fetch=True, maybe only if one existed already
publish_depends=publish_depends,
result_renderer='disabled')
# TODO let submodule URLs point to GitHub (optional)
| 41.442446
| 90
| 0.588143
|
4be32c9cff0e5dbf1af563bfa8c53555adceb91c
| 2,185
|
py
|
Python
|
core/error.py
|
ykomashiro/gd-backup
|
02d2513345a666aec131e94411b5f4c177fbe6ec
|
[
"MIT"
] | 1
|
2020-12-11T04:13:20.000Z
|
2020-12-11T04:13:20.000Z
|
core/error.py
|
ykomashiro/gd-backup
|
02d2513345a666aec131e94411b5f4c177fbe6ec
|
[
"MIT"
] | null | null | null |
core/error.py
|
ykomashiro/gd-backup
|
02d2513345a666aec131e94411b5f4c177fbe6ec
|
[
"MIT"
] | null | null | null |
import logging
import logging.config
import os
import threading
import yaml
def synchronized(func):
func.__lock__ = threading.Lock()
def lock_func(*args, **kwargs):
with func.__lock__:
return func(*args, **kwargs)
return lock_func
class LogSingleton(object):
instance = None
def __init__(self):
LogSingleton.setup_logging()
self.logger = logging.getLogger("logger")
def info(self, msg: str):
self.logger.info(msg)
def warning(self, msg: str):
self.logger.warning(msg)
def error(self, msg: str):
self.logger.error(msg)
def critical(self, msg: str):
self.logger.critical(msg)
@staticmethod
def setup_logging(default_path="data/log.yaml",
default_level=logging.INFO,
env_key="LOG_CFG"):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
logging.config.dictConfig(config)
@synchronized
def __new__(cls, *args, **kwargs):
"""
:type kwargs: object
"""
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
class RuntimeException(Exception):
def __init__(self,
code=-1,
message="unknown error",
description="unknown error"):
self.code_ = code
self.message_ = message
self.description_ = description
self.__write_log()
def __repr__(self):
text = f">>{self.code_} >>{self.message_} >>{self.description_}"
return text
def __str__(self):
text = f">>{self.code_} >>{self.message_} >>{self.description_}"
return text
def __write_log(self):
text = f">>{self.code_} >>{self.message_} >>{self.description_}"
logger = LogSingleton()
logger.error(text)
if __name__ == "__main__":
try:
raise RuntimeException()
except RuntimeException as e:
print(e)
| 24.550562
| 72
| 0.577574
|
12f7a3ce5186ac63f9cb24543fa28cbce8a64e51
| 12,920
|
py
|
Python
|
mars/scheduler/assigner.py
|
wdkwyf/mars
|
3f750e360e64380eab779301a5103994d4886b6a
|
[
"Apache-2.0"
] | null | null | null |
mars/scheduler/assigner.py
|
wdkwyf/mars
|
3f750e360e64380eab779301a5103994d4886b6a
|
[
"Apache-2.0"
] | null | null | null |
mars/scheduler/assigner.py
|
wdkwyf/mars
|
3f750e360e64380eab779301a5103994d4886b6a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import heapq
import logging
import os
import random
import sys
import time
from collections import defaultdict
from .. import promise
from ..config import options
from ..errors import DependencyMissing
from ..utils import log_unhandled
from .resource import ResourceActor
from .utils import SchedulerActor
logger = logging.getLogger(__name__)
class ChunkPriorityItem(object):
"""
Class providing an order for operands for assignment
"""
def __init__(self, session_id, op_key, op_info, callback):
self._op_key = op_key
self._session_id = session_id
self._op_info = op_info
self._target_worker = op_info.get('target_worker')
self._callback = callback
self._priority = ()
self.update_priority(op_info['optimize'])
def update_priority(self, priority_data, copyobj=False):
obj = self
if copyobj:
obj = copy.deepcopy(obj)
priorities = []
priorities.extend([
priority_data.get('depth', 0),
priority_data.get('demand_depths', ()),
-priority_data.get('successor_size', 0),
-priority_data.get('placement_order', 0),
priority_data.get('descendant_size'),
])
obj._priority = tuple(priorities)
return obj
@property
def session_id(self):
return self._session_id
@property
def op_key(self):
return self._op_key
@property
def target_worker(self):
return self._target_worker
@target_worker.setter
def target_worker(self, value):
self._target_worker = value
@property
def callback(self):
return self._callback
@property
def op_info(self):
return self._op_info
def __repr__(self):
return '<ChunkPriorityItem(%s(%s))>' % (self.op_key, self.op_info['op_name'])
def __lt__(self, other):
return self._priority > other._priority
class AssignerActor(SchedulerActor):
"""
Actor handling worker assignment requests from operands.
Note that this actor does not assign workers itself.
"""
@staticmethod
def gen_uid(session_id):
return 's:h1:assigner$%s' % session_id
def __init__(self):
super(AssignerActor, self).__init__()
self._requests = dict()
self._req_heap = []
self._cluster_info_ref = None
self._actual_ref = None
self._resource_ref = None
self._worker_metrics = None
# since worker metrics does not change frequently, we update it
# only when it is out of date
self._worker_metric_time = 0
def post_create(self):
logger.debug('Actor %s running in process %d', self.uid, os.getpid())
self.set_cluster_info_ref()
# the ref of the actor actually handling assignment work
session_id = self.uid.rsplit('$', 1)[-1]
self._actual_ref = self.ctx.create_actor(AssignEvaluationActor, self.ref(),
uid=AssignEvaluationActor.gen_uid(session_id))
self._resource_ref = self.get_actor_ref(ResourceActor.default_uid())
def pre_destroy(self):
self._actual_ref.destroy()
def allocate_top_resources(self):
self._actual_ref.allocate_top_resources(_tell=True)
def mark_metrics_expired(self):
logger.debug('Metrics cache marked as expired.')
self._worker_metric_time = 0
self._actual_ref.mark_metrics_expired(_tell=True)
def _refresh_worker_metrics(self):
t = time.time()
if self._worker_metrics is None or self._worker_metric_time + 1 < time.time():
# update worker metrics from ResourceActor
self._worker_metrics = self._resource_ref.get_workers_meta()
self._worker_metric_time = t
def filter_alive_workers(self, workers, refresh=False):
if refresh:
self._refresh_worker_metrics()
return [w for w in workers if w in self._worker_metrics] if self._worker_metrics else []
@promise.reject_on_exception
@log_unhandled
def apply_for_resource(self, session_id, op_key, op_info, callback=None):
"""
Register resource request for an operand
:param session_id: session id
:param op_key: operand key
:param op_info: operand information, should be a dict
:param callback: promise callback, called when the resource is assigned
"""
self._refresh_worker_metrics()
priority_item = ChunkPriorityItem(session_id, op_key, op_info, callback)
if priority_item.target_worker not in self._worker_metrics:
priority_item.target_worker = None
self._requests[op_key] = priority_item
heapq.heappush(self._req_heap, priority_item)
self._actual_ref.allocate_top_resources(_tell=True)
@log_unhandled
def update_priority(self, op_key, priority_data):
"""
Update priority data for an operand. The priority item will be
pushed into priority queue again.
:param op_key: operand key
:param priority_data: new priority data
"""
if op_key not in self._requests:
return
obj = self._requests[op_key].update_priority(priority_data, copyobj=True)
heapq.heappush(self._req_heap, obj)
@log_unhandled
def remove_apply(self, op_key):
"""
Cancel request for an operand
:param op_key: operand key
"""
if op_key in self._requests:
del self._requests[op_key]
def pop_head(self):
"""
Pop and obtain top-priority request from queue
:return: top item
"""
item = None
while self._req_heap:
item = heapq.heappop(self._req_heap)
if item.op_key in self._requests:
# use latest request item
item = self._requests[item.op_key]
break
else:
item = None
return item
def extend(self, items):
"""
Extend heap by an iterable object. The heap will be reheapified.
:param items: priority items
"""
self._req_heap.extend(items)
heapq.heapify(self._req_heap)
class AssignEvaluationActor(SchedulerActor):
"""
Actor assigning operands to workers
"""
@classmethod
def gen_uid(cls, session_id):
return 's:0:%s$%s' % (cls.__name__, session_id)
def __init__(self, assigner_ref):
super(AssignEvaluationActor, self).__init__()
self._worker_metrics = None
self._worker_metric_time = time.time() - 2
self._cluster_info_ref = None
self._assigner_ref = assigner_ref
self._resource_ref = None
self._sufficient_operands = set()
self._operand_sufficient_time = dict()
def post_create(self):
logger.debug('Actor %s running in process %d', self.uid, os.getpid())
self.set_cluster_info_ref()
self._assigner_ref = self.ctx.actor_ref(self._assigner_ref)
self._resource_ref = self.get_actor_ref(ResourceActor.default_uid())
self.periodical_allocate()
def mark_metrics_expired(self):
logger.debug('Metrics cache marked as expired.')
self._worker_metric_time = 0
def periodical_allocate(self):
self.allocate_top_resources()
self.ref().periodical_allocate(_tell=True, _delay=0.5)
def allocate_top_resources(self):
"""
Allocate resources given the order in AssignerActor
"""
t = time.time()
if self._worker_metrics is None or self._worker_metric_time + 1 < time.time():
# update worker metrics from ResourceActor
self._worker_metrics = self._resource_ref.get_workers_meta()
self._worker_metric_time = t
if not self._worker_metrics:
return
unassigned = []
reject_workers = set()
# the assigning procedure will continue till
while len(reject_workers) < len(self._worker_metrics):
item = self._assigner_ref.pop_head()
if not item:
break
try:
alloc_ep, rejects = self._allocate_resource(
item.session_id, item.op_key, item.op_info, item.target_worker,
reject_workers=reject_workers, callback=item.callback)
except: # noqa: E722
logger.exception('Unexpected error occurred in %s', self.uid)
self.tell_promise(item.callback, *sys.exc_info(), **dict(_accept=False))
continue
# collect workers failed to assign operand to
reject_workers.update(rejects)
if alloc_ep:
# assign successfully, we remove the application
self._assigner_ref.remove_apply(item.op_key)
else:
# put the unassigned item into unassigned list to add back to the queue later
unassigned.append(item)
if unassigned:
# put unassigned back to the queue, if any
self._assigner_ref.extend(unassigned)
@log_unhandled
def _allocate_resource(self, session_id, op_key, op_info, target_worker=None, reject_workers=None, callback=None):
"""
Allocate resource for single operand
:param session_id: session id
:param op_key: operand key
:param op_info: operand info dict
:param target_worker: worker to allocate, can be None
:param reject_workers: workers denied to assign to
:param callback: promise callback from operands
"""
if target_worker not in self._worker_metrics:
target_worker = None
reject_workers = reject_workers or set()
op_io_meta = op_info['io_meta']
try:
input_metas = op_io_meta['input_data_metas']
input_data_keys = list(input_metas.keys())
input_sizes = dict((k, v.chunk_size) for k, v in input_metas.items())
except KeyError:
input_data_keys = op_io_meta['input_chunks']
input_metas = self._get_chunks_meta(session_id, input_data_keys)
if any(m is None for m in input_metas.values()):
raise DependencyMissing('Dependency missing for operand %s' % op_key)
input_sizes = dict((k, meta.chunk_size) for k, meta in input_metas.items())
if target_worker is None:
who_has = dict((k, meta.workers) for k, meta in input_metas.items())
candidate_workers = self._get_eps_by_worker_locality(input_data_keys, who_has, input_sizes)
else:
candidate_workers = [target_worker]
candidate_workers = [w for w in candidate_workers if w not in reject_workers]
if not candidate_workers:
return None, []
# todo make more detailed allocation plans
alloc_dict = dict(cpu=options.scheduler.default_cpu_usage, memory=sum(input_sizes.values()))
rejects = []
for worker_ep in candidate_workers:
if self._resource_ref.allocate_resource(session_id, op_key, worker_ep, alloc_dict):
logger.debug('Operand %s(%s) allocated to run in %s', op_key, op_info['op_name'], worker_ep)
self.tell_promise(callback, worker_ep, input_sizes)
return worker_ep, rejects
rejects.append(worker_ep)
return None, rejects
def _get_chunks_meta(self, session_id, keys):
return dict(zip(keys, self.chunk_meta.batch_get_chunk_meta(session_id, keys)))
def _get_eps_by_worker_locality(self, input_keys, chunk_workers, input_sizes):
locality_data = defaultdict(lambda: 0)
for k in input_keys:
if k in chunk_workers:
for ep in chunk_workers[k]:
locality_data[ep] += input_sizes[k]
workers = list(self._worker_metrics.keys())
random.shuffle(workers)
max_locality = -1
max_eps = []
for ep in workers:
if locality_data[ep] > max_locality:
max_locality = locality_data[ep]
max_eps = [ep]
elif locality_data[ep] == max_locality:
max_eps.append(ep)
return max_eps
| 35.20436
| 118
| 0.641486
|
121cd448f86b4f1d0e8b5865b5282f4a2fbeb2d1
| 10,789
|
py
|
Python
|
l5kit/l5kit/tests/evaluation/metrics_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | 1
|
2021-12-04T17:48:53.000Z
|
2021-12-04T17:48:53.000Z
|
l5kit/l5kit/tests/evaluation/metrics_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | null | null | null |
l5kit/l5kit/tests/evaluation/metrics_test.py
|
cdicle-motional/l5kit
|
4dc4ee5391479bb71f0b373f39c316f9eef5a961
|
[
"Apache-2.0"
] | 1
|
2021-11-19T08:13:46.000Z
|
2021-11-19T08:13:46.000Z
|
import numpy as np
import pytest
import torch
from l5kit.evaluation.metrics import (_assert_shapes, average_displacement_error_mean,
average_displacement_error_oracle, CollisionType, detect_collision,
distance_to_reference_trajectory, final_displacement_error_mean,
final_displacement_error_oracle, neg_multi_log_likelihood, prob_true_mode, rmse,
time_displace)
from l5kit.planning.utils import _get_bounding_box, _get_sides, within_range
def test_assert_shapes() -> None:
num_modes, future_len, num_coords = 4, 12, 2
gt = np.random.randn(future_len, num_coords)
pred = np.random.randn(num_modes, future_len, num_coords)
avail = np.ones(future_len)
conf = np.random.rand(num_modes)
conf /= np.sum(conf, axis=-1, keepdims=True)
# test un-normalised conf
with pytest.raises(AssertionError):
conf_un = np.random.rand(4)
_assert_shapes(gt, pred, conf_un, avail)
# test single pred with no axis
with pytest.raises(AssertionError):
_assert_shapes(gt, pred[0], conf, avail)
# NLL shape must be ()
assert neg_multi_log_likelihood(gt, pred, conf, avail).shape == ()
# RMSE shape must be ()
assert rmse(gt, pred, conf, avail).shape == ()
# prob_true_mode shape must be (M)
assert prob_true_mode(gt, pred, conf, avail).shape == (num_modes,)
# displace_t shape must be (T)
assert time_displace(gt, pred, conf, avail).shape == (future_len,)
def test_neg_multi_log_likelihood_known_results() -> None:
# below M=2, T=3, C=1 and CONF=[1,1,1]
num_modes, future_len, num_coords = 2, 3, 1
avail = np.ones(future_len)
gt = np.zeros((future_len, num_coords))
pred = np.zeros((num_modes, future_len, num_coords))
pred[0] = [[0], [0], [0]]
pred[1] = [[10], [10], [10]]
# single mode, one 100% right
confs = np.asarray((1, 0))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 0)
assert np.allclose(rmse(gt, pred, confs, avail), 0)
# two equal modes, one 100% right
confs = np.asarray((0.5, 0.5))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 0.69314, atol=1e-4)
assert np.allclose(rmse(gt, pred, confs, avail), np.sqrt(2 * 0.69314 / future_len), atol=1e-4)
# two equal modes, answer in between
gt = np.full((future_len, num_coords), 5)
confs = np.asarray((0.5, 0.5))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 37.5, atol=1e-4)
assert np.allclose(rmse(gt, pred, confs, avail), np.sqrt(2 * 37.5 / future_len), atol=1e-4)
# two modes, one 50% right = answer in between
gt = np.full((future_len, num_coords), 5)
confs = np.asarray((1, 0))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 37.5, atol=1e-4)
assert np.allclose(rmse(gt, pred, confs, avail), np.sqrt(2 * 37.5 / future_len), atol=1e-4)
# Example 5
gt = np.zeros((future_len, num_coords))
gt[1, 0] = 10
confs = np.asarray((1, 0))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 50, atol=1e-4)
assert np.allclose(rmse(gt, pred, confs, avail), np.sqrt(2 * 50 / future_len), atol=1e-4)
# Example 6
gt = np.zeros((future_len, num_coords))
gt[1, 0] = 10
confs = np.asarray((0.5, 0.5))
assert np.allclose(neg_multi_log_likelihood(gt, pred, confs, avail), 50.6931, atol=1e-4)
assert np.allclose(rmse(gt, pred, confs, avail), np.sqrt(2 * 50.6931 / future_len), atol=1e-4)
# Test overflow resistance in two situations
confs = np.asarray((0.5, 0.5))
pred[0] = [[1000], [1000], [1000]]
pred[1] = [[1000], [1000], [1000]]
gt = np.zeros((future_len, num_coords))
assert not np.isinf(neg_multi_log_likelihood(gt, pred, confs, avail))
assert not np.isinf(rmse(gt, pred, confs, avail))
# this breaks also max-version if confidence is not included in exp
confs = np.asarray((1.0, 0.0))
pred[0] = [[100000], [1000], [1000]]
pred[1] = [[1000], [1000], [1000]]
gt = np.zeros((future_len, num_coords))
assert not np.isinf(neg_multi_log_likelihood(gt, pred, confs, avail))
assert not np.isinf(rmse(gt, pred, confs, avail))
def test_other_metrics_known_results() -> None:
gt = np.asarray([[50, 0], [50, 0], [50, 0]])
avail = np.ones(3)
pred = np.asarray([[[50, 0], [50, 0], [50, 0]], [[100, 100], [100, 100], [100, 100]]])
confs = np.asarray((0.5, 0.5))
assert np.allclose(prob_true_mode(gt, pred, confs, avail), (1.0, 0.0))
assert np.allclose(time_displace(gt, pred, confs, avail), (0.0, 0.0, 0.0))
pred = np.asarray([[[52, 0], [52, 0], [52, 0]], [[49, 0], [51, 0], [50, 2]]])
confs = np.asarray((0.1, 0.9))
assert np.allclose(prob_true_mode(gt, pred, confs, avail), (0.0055, 0.9944), atol=1e-4)
assert np.allclose(time_displace(gt, pred, confs, avail), (1.0055, 1.0055, 2.0), atol=1e-4)
def test_ade_fde_known_results() -> None:
# below M=2, T=3, C=1 and CONF=[1,1,1]
num_modes, future_len, num_coords = 2, 3, 1
avail = np.ones(future_len)
gt = np.zeros((future_len, num_coords))
pred = np.zeros((num_modes, future_len, num_coords))
pred[0] = [[0], [0], [0]]
pred[1] = [[1], [2], [3]]
# Confidences do not matter here.
confs = np.asarray((0.5, 0.5))
assert np.allclose(average_displacement_error_mean(gt, pred, confs, avail), 1)
assert np.allclose(average_displacement_error_oracle(gt, pred, confs, avail), 0)
assert np.allclose(final_displacement_error_mean(gt, pred, confs, avail), 1.5)
assert np.allclose(final_displacement_error_oracle(gt, pred, confs, avail), 0)
gt = np.full((future_len, num_coords), 0.5)
assert np.allclose(average_displacement_error_mean(gt, pred, confs, avail), 1.0)
assert np.allclose(average_displacement_error_oracle(gt, pred, confs, avail), 0.5)
assert np.allclose(final_displacement_error_mean(gt, pred, confs, avail), 1.5)
assert np.allclose(final_displacement_error_oracle(gt, pred, confs, avail), 0.5)
def test_within_range() -> None:
# Fixture: overlapping ego and agent
ego_centroid = np.array([[10., 10.]])
ego_extent = np.array([[5.0, 2.0, 2.0]])
agent_centroid = np.array([[10.0, 10.0]])
agent_extent = np.array([[5., 2., 2.]])
# Ovelarpping ego and agent should be within range
assert within_range(ego_centroid, ego_extent,
agent_centroid, agent_extent)
# The contrary is also true
assert within_range(agent_centroid, agent_extent,
ego_centroid, ego_extent)
# Agent is far from the ego, not within range
assert not within_range(ego_centroid, ego_extent,
agent_centroid + 1000.0, agent_extent)
# Repeat dimension (10, D)
num_repeat = 10
ego_centroid = ego_centroid.repeat(num_repeat, axis=0)
ego_extent = ego_extent.repeat(num_repeat, axis=0)
agent_centroid = agent_centroid.repeat(num_repeat, axis=0)
agent_extent = agent_extent.repeat(num_repeat, axis=0)
truth_value = within_range(ego_centroid, ego_extent,
agent_centroid, agent_extent)
assert len(truth_value) == num_repeat
assert truth_value.all()
# Only half not within range
ego_centroid[5:, :] += 1000.0
truth_value = within_range(ego_centroid, ego_extent,
agent_centroid, agent_extent)
assert len(truth_value) == num_repeat
assert np.count_nonzero(truth_value) == 5
def test_get_bounding_box() -> None:
agent_centroid = np.array([10., 10.])
agent_extent = np.array([5.0, 2.0, 2.0])
agent_yaw = 1.0
bbox = _get_bounding_box(agent_centroid, agent_yaw, agent_extent)
# Check centroid coordinates and other preoperties of the polygon
assert np.allclose(bbox.centroid.coords, agent_centroid)
assert np.allclose(bbox.area, 10.0)
assert not bbox.is_empty
assert bbox.is_valid
def test_get_sides() -> None:
agent_centroid = np.array([10., 10.])
agent_extent = np.array([5.0, 2.0, 2.0])
agent_yaw = 1.0
bbox = _get_bounding_box(agent_centroid, agent_yaw, agent_extent)
front, rear, left, right = _get_sides(bbox)
# The parallel offset of s1 should be the same as s2
coords_parallel_s1 = np.array(front.parallel_offset(agent_extent[0], 'right').coords)
coords_s2 = np.array(rear.coords)
assert np.allclose(coords_s2, coords_parallel_s1)
# One side shouldn't touch the other parallel side
assert not front.touches(rear)
# .. but should touch other ortoghonal
assert front.touches(left)
assert front.touches(right)
assert np.allclose(left.length, agent_extent[0])
assert np.allclose(right.length, agent_extent[0])
assert np.allclose(front.length, agent_extent[1])
assert np.allclose(rear.length, agent_extent[1])
def test_detect_collision() -> None:
pred_centroid = np.array([0.0, 0.0])
pred_yaw = np.array([1.0])
pred_extent = np.array([5., 2., 2.])
target_agents_dtype = np.dtype([('centroid', '<f8', (2,)),
('extent', '<f4', (3,)),
('yaw', '<f4'), ('track_id', '<u8')])
target_agents = np.array([
([1000.0, 1000.0], [5., 2., 2.], 1.0, 1), # Not in range
([0., 0.], [5., 2., 2.], 1.0, 2), # In range
([0., 0.], [5., 2., 2.], 1.0, 3), # In range
], dtype=target_agents_dtype)
collision = detect_collision(pred_centroid, pred_yaw, pred_extent, target_agents)
assert collision == (CollisionType.SIDE, 2)
target_agents = np.array([
([1000.0, 1000.0], [5., 2., 2.], 1.0, 1),
], dtype=target_agents_dtype)
collision = detect_collision(pred_centroid, pred_yaw, pred_extent, target_agents)
assert collision is None
@pytest.mark.parametrize("device", ["cpu", "cuda"])
def test_distance_to_reference_trajectory(device: str) -> None:
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("requires CUDA")
# [batch_size, 2]
pred_centroid = torch.tensor([[1, 0], [1, 1], [1.5, 2]], dtype=torch.float32, device=device)
# [batch_size, num_timestamps, 2]
ref_traj = torch.tensor([[[0, 0], [1, 0], [2, 0], [3, 0]],
[[0, 0], [1, 0], [2, 0], [3, 0]],
[[0, 3], [1, 3], [2, 3], [3, 3]]], dtype=torch.float32, device=device)
# [batch_size,]
distance = distance_to_reference_trajectory(pred_centroid, ref_traj)
expected_distance = torch.tensor([0, 1, 1.11803], dtype=torch.float32, device=device)
assert torch.allclose(distance, expected_distance, atol=1e-4)
| 39.811808
| 118
| 0.640467
|
508c810ab44598fb3c1164e772a97e5433629d51
| 2,612
|
py
|
Python
|
subastas_repo/personas/views.py
|
diegoduncan21/subastas
|
3bae0c9bbb90ad93688b91d6e86bcc64215c0125
|
[
"BSD-3-Clause"
] | null | null | null |
subastas_repo/personas/views.py
|
diegoduncan21/subastas
|
3bae0c9bbb90ad93688b91d6e86bcc64215c0125
|
[
"BSD-3-Clause"
] | null | null | null |
subastas_repo/personas/views.py
|
diegoduncan21/subastas
|
3bae0c9bbb90ad93688b91d6e86bcc64215c0125
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.views.generic import CreateView
from django.views.generic import DeleteView
from django.views.generic import ListView
from django.views.generic import UpdateView
from django.shortcuts import get_object_or_404, render, redirect
from braces.views import LoginRequiredMixin
from personas.forms import ProfesionalForm
from subastas.forms import InscriptionForm
from subastas.models import Subasta
from .models import Persona, Profesional
@login_required
def asociar(request, subasta_id):
subasta = get_object_or_404(Subasta, pk=subasta_id)
if request.method == "POST":
form = InscriptionForm(request.POST, instance=subasta)
if form.is_valid():
personas = form.cleaned_data.get('personas')
subasta.personas.add(*personas)
msg = 'Usuarios agregados exitosamente.'
else:
msg = 'No tiene personas tildadas.'
messages.add_message(request,
messages.INFO,
msg)
return redirect(reverse_lazy('subastas:acreditadores')+'?tab=search')
@login_required
def desasociar(request, subasta_id, persona_id):
subasta = get_object_or_404(Subasta, pk=subasta_id)
persona = get_object_or_404(Persona, pk=persona_id)
subasta.personas.remove(persona)
messages.add_message(request,
messages.INFO,
'Se borro la inscripcion exitosamente.')
return redirect(reverse_lazy('subastas:acreditadores')+'?tab=search')
class ProfesionalListView(LoginRequiredMixin, ListView):
model = Profesional
template_name = 'personas/profesionales/list.html'
def get_queryset(self):
return Profesional.objects.all().order_by('titulo')
class ProfesionalCreateView(LoginRequiredMixin, CreateView):
form_class = ProfesionalForm
model = Profesional
template_name = 'personas/profesionales/form.html'
success_url = reverse_lazy('personas:profesionales_list')
class ProfesionalUpdateView(LoginRequiredMixin, UpdateView):
context_object_name = 'instance'
form_class = ProfesionalForm
model = Profesional
template_name = 'personas/profesionales/form.html'
success_url = reverse_lazy('personas:profesionales_list')
class ProfesionalDeleteView(LoginRequiredMixin, DeleteView):
model = Profesional
template_name = 'personas/profesionales/confirm_delete.html'
success_url = reverse_lazy('personas:profesionales_list')
| 35.780822
| 73
| 0.737366
|
11ab84b872f8ffdb6b635eb6f4fbd6192888e39b
| 228
|
py
|
Python
|
chart/models.py
|
Dev-Briann/tutorials
|
01faee527b869d7b3ca4393f194fd715e9325d10
|
[
"MIT"
] | null | null | null |
chart/models.py
|
Dev-Briann/tutorials
|
01faee527b869d7b3ca4393f194fd715e9325d10
|
[
"MIT"
] | null | null | null |
chart/models.py
|
Dev-Briann/tutorials
|
01faee527b869d7b3ca4393f194fd715e9325d10
|
[
"MIT"
] | null | null | null |
from django.db import models
class SalesRecords(models.Model):
month = models.IntegerField()
sales = models.DecimalField(max_digits=5, decimal_places=1)
expenses = models.DecimalField(max_digits=5, decimal_places=1)
| 38
| 66
| 0.77193
|
83f39f1971833d9baede06e20e95a4f7bb985a47
| 11,793
|
py
|
Python
|
dithermaker.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 1
|
2019-03-15T04:01:19.000Z
|
2019-03-15T04:01:19.000Z
|
dithermaker.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 5
|
2017-12-11T00:11:39.000Z
|
2021-07-09T17:05:16.000Z
|
dithermaker.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 2
|
2017-08-15T21:19:11.000Z
|
2017-10-12T00:36:35.000Z
|
#!/usr/bin/env python
#######################
# Script to take center RA/DEC location & generate dithered offsets
# for baseline project exposures
########################
### History:
## 2014/07/21 rgm: take out "coordinates": "absolute",
## which we are told is invalid.
## 2014/07/25 rgm: fix --disableoutrigger; add argv to end of config
import sys, argparse, random, numpy as np
import json, math
########################
def generateMainDithers(nexp, dithersize):
if nexp == 0:
return []
exp_offsets = [(0., 0.)]
if nexp == 1:
return exp_offsets
OneOffsetPositive = True
OneOffsetCount = 0
IndexPositive = True
IndexCount = 1
IndexOffsetLeft = True
for i in range(1, nexp):
IndexOffset = i
if not IndexPositive:
IndexOffset = -i
OneOffset = 1
if not OneOffsetPositive:
OneOffset = -1
if IndexOffsetLeft:
exp_offsets.append((IndexOffset*dithersize, OneOffset*dithersize))
else:
exp_offsets.append((OneOffset*dithersize, IndexOffset*dithersize))
IndexOffsetLeft = not IndexOffsetLeft
OneOffsetCount += 1
if OneOffsetCount == 2:
OneOffsetPositive = not OneOffsetPositive
OneOffsetCount = 0
IndexCount += 1
if IndexCount == 2:
IndexPositive = not IndexPositive
IndexCount = 0
return exp_offsets
########################
def absolutePointings(ra, dec, exp_offsets):
cum_offsets = []
ra = ra
dec = dec
for offset in exp_offsets:
ra += (offset[0]/3600.)/np.cos(dec*np.pi/180.)
dec += (offset[1]/3600.)
cum_offsets.append((ra, dec))
return cum_offsets
########################
def generateOutriggers(ra, dec, outriggersize):
exp_positions = []
posangle = random.uniform(0, 360)
for i in range(3):
angle = (posangle + i*(360/3.))*(np.pi/180.)
exp_positions.append( (ra + outriggersize*np.cos(angle)/np.cos(dec*np.pi/180.),
dec + outriggersize*np.sin(angle)) )
return exp_positions
########################
def writeExposures(output, exposures):
json_string = json.dumps(exposures, sort_keys = True, indent=4)
output.write(json_string)
#######################
def countTime(exposures, overhead):
nexp = len(exposures)
if nexp == 0:
return 0, 0
imagetime = reduce(lambda x,y:x+y, [x['expTime'] for x in exposures if 'break' not in x])
totaltime = imagetime + nexp*overhead
return imagetime, totaltime
########################
def createScript(args):
output = open(args.scriptname, 'w')
### deal with stuff always at the start of the exposure sequence
start_exposures = []
if args.breakstart:
breakKeys = {'break' : True}
start_exposures.append(breakKeys)
#short test exposure
if not args.noshortexp:
seqid = '%s_%s_%d_short' % (args.object, args.filter, args.visit)
keywords = {'expType' : 'object', # 'coordinates' : 'absolute',
'RA' : args.ra, 'dec' : args.dec,
'filter' : args.filter, 'object' : args.object,
'expTime' : 10,
'seqid' : seqid, 'seqnum' : 0, 'seqtot' : 2}
start_exposures.append(keywords)
exptime = math.ceil(10**((np.log10(10.) + \
np.log10(args.singletime))/2.))
keywords = {'expType' : 'object', # 'coordinates' : 'absolute',
'RA' : args.ra, 'dec' : args.dec,
'filter' : args.filter, 'object' : args.object,
'expTime' : exptime,
'seqid' : seqid, 'seqnum' : 0, 'seqtot' : 2}
start_exposures.append(keywords)
breakKeys = {'break' : True}
start_exposures.append(breakKeys)
#deal with dithers
seqid = '%s_%s_%d_dither' % (args.object, args.filter, args.visit)
nexp = args.nexp
science_exposures = []
exposure_offsets = generateMainDithers(nexp = nexp, dithersize=args.dithersize)
abs_pointings = absolutePointings(args.ra, args.dec, exposure_offsets)
expIDoffset = args.startwithexpnum
for seqnum, pointing in enumerate(abs_pointings[expIDoffset:]):
# keywords = {'expType' : 'object', 'coordinates' : 'absolute', 'RA' : pointing[0], 'DEC' : pointing[1],
keywords = {'expType' : 'object', 'RA' : pointing[0], 'DEC' : pointing[1],
'filter' : args.filter, 'object' : args.object, 'expTime' : args.singletime,
'seqid' : seqid, 'seqnum' : expIDoffset+seqnum, 'seqtot' : nexp}
science_exposures.append(keywords)
#deal with outriggers
outrigger_exposures = []
if not args.disableoutrigger:
outrigger_positions = generateOutriggers(ra = args.ra, dec = args.dec, outriggersize = args.outriggersize)
# make sure we grab the central exposure if we aren't taking science images
outriggerid = '%s_%s_%d_outrigger' % (args.object, args.filter, args.visit)
outriggernum = 3
if args.nexp == 0:
outrigger_positions.insert(0,(args.ra, args.dec))
outriggernum = 4
for seqnum, exp_pos in enumerate(outrigger_positions):
# keywords = {'expType' : 'object', 'coordinates' : 'absolute',
keywords = {'expType' : 'object',
'RA' : exp_pos[0], 'DEC' : exp_pos[1],
'filter' : args.filter, 'object' : args.object, 'expTime' : args.outriggertime,
'seqid' : outriggerid, 'seqnum' : seqnum, 'seqtot' : outriggernum}
outrigger_exposures.append(keywords)
if args.outriggerfirst:
exposures = start_exposures + outrigger_exposures + science_exposures
else:
exposures = start_exposures + science_exposures + outrigger_exposures
if not args.nobreakend:
breakKeys = {'break' : True}
exposures.append(breakKeys)
writeExposures(output, exposures)
output.close()
calibImage, calibTotal = countTime(start_exposures + outrigger_exposures, args.overhead)
sciImage, sciTotal = countTime(science_exposures, args.overhead)
return sciImage, calibImage, sciTotal + calibTotal
########################
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--ra', type=float)
parser.add_argument('--dec', type=float)
parser.add_argument('--nexp', type=int,
help = 'Total number of full-length science exposures')
parser.add_argument('--singletime', type=int,
help = 'Exposure time per image')
parser.add_argument('--dithersize', type=float, default=60.0,
help = 'Basic unit size for dither; arcseconds')
parser.add_argument('--disableoutrigger', default=False, action='store_true')
parser.add_argument('--outriggersize', type=float, default = 0.5,
help = 'Step size from center for outrigger exp; degrees')
parser.add_argument('--outriggertime', type=int, default=-1,
help = 'Exposure time for each outrigger')
parser.add_argument('--outriggerfirst', default=False, action='store_true')
parser.add_argument('--filter', type=str)
parser.add_argument('--object', type=str)
parser.add_argument('--visit', type=int, default=0)
parser.add_argument('--breakstart', default=False, action='store_true')
parser.add_argument('--nobreakend', default=False, action='store_true')
parser.add_argument('--startwithexpnum', type=int, default = 0, help='Start at a different initial dither; 0-indexed')
parser.add_argument('--noshortexp', default=False, action='store_true')
parser.add_argument('--overhead', type=int, default = 30)
parser.add_argument('--offset', type=float, default = -4.5,
help = 'Offset in DEC direction to place center of cluster in the middle of a chip (in arcmin)')
parser.add_argument('--scriptname', type=str, default='')
args = parser.parse_args(argv)
args.offset = args.offset/60.
args.dec = args.dec + args.offset
if args.outriggertime == -1:
args.outriggertime = args.singletime / 2.
outriggerFlag = 1
if args.disableoutrigger:
outriggerFlag = 0
if args.scriptname == '':
args.scriptname = '%s_%s_v%d_sci%d-%d_out%d.script' % (args.object, args.filter, args.visit, args.startwithexpnum, args.nexp, outriggerFlag)
configfile = '%s.config' % args.scriptname
print 'Called with configuration:'
print 'RA: %f' % args.ra
print 'DEC: %f' % (args.dec - args.offset)
print 'DEC Offset: %f arcmin' % (60*args.offset)
print 'Number of Science Exposures: %d' % args.nexp
print 'Single Exposure: %d' % args.singletime
print 'Dither Size: %f' % args.dithersize
print 'Disable Outrigger?: %s' % args.disableoutrigger
print 'Outrigger First?: %s' % args.outriggerfirst
print 'Outrigger Size: %f' % args.outriggersize
print 'Outrigger time: %f' % args.outriggertime
print 'Filter: %s' % args.filter
print 'Object: %s' % args.object
print 'Break Start? : %s' % args.breakstart
print 'Break End? : %s' % (not args.nobreakend)
print 'Overhead: %d' % args.overhead
print 'First Exposure : %d' % args.startwithexpnum
print 'Script Name: %s' % args.scriptname
scitime, calibtime, totaltime = createScript(args)
print
print 'Science Time: %d' % scitime
print 'Calib Time: %d' % calibtime
print 'Total Time: %d' % totaltime
with open(configfile, 'w') as output:
output.write('Called with configuration:\n' )
output.write('RA: %f\n' % args.ra )
output.write('DEC: %f\n' % (args.dec - args.offset) )
output.write('DEC Offset: %f arcmin\n' % (60*args.offset))
output.write('Number of Science Exposures: %d\n' % args.nexp )
output.write('Single Exposure: %d\n' % args.singletime )
output.write('Dither Size: %f\n' % args.dithersize )
output.write('Disable Outrigger?: %s\n' % args.disableoutrigger )
output.write('Outrigger First?: %s\n' % args.outriggerfirst )
output.write('Outrigger Size: %f\n' % args.outriggersize )
output.write('Outigger Time: %f\n' % args.outriggertime )
output.write('Filter: %s\n' % args.filter )
output.write('Object: %s\n' % args.object )
output.write('Break Start? : %s\n' % args.breakstart )
output.write('Break End? : %s\n' % (not args.nobreakend) )
output.write('Overhead : %d\n' % args.overhead )
output.write('First Exposure : %d\n' % args.startwithexpnum )
output.write('Script Name: %s\n' % args.scriptname )
output.write('Science Time: %d\n' % scitime )
output.write('Calib Time: %d\n' % calibtime )
output.write('Total Time: %d\n' % totaltime )
output.write('Arguments: %s\n' % ' '.join(map(str,sys.argv[1:])))
#########################
if __name__ == '__main__':
main(argv = sys.argv[1:])
| 34.890533
| 149
| 0.572882
|
e3736f9d4e331c23c6f68daeddb8f940fea9e6e4
| 1,270
|
py
|
Python
|
cloudml-samples-master/reddit_tft/path_constants.py
|
liufuyang/kaggle-youtube-8m
|
1cfbbf92ec9b5ead791f98f7a09463ee165a8120
|
[
"MIT"
] | 16
|
2017-04-09T09:51:00.000Z
|
2019-04-29T11:29:32.000Z
|
cloudml-samples-master/reddit_tft/path_constants.py
|
liufuyang/kaggle-youtube-8m
|
1cfbbf92ec9b5ead791f98f7a09463ee165a8120
|
[
"MIT"
] | null | null | null |
cloudml-samples-master/reddit_tft/path_constants.py
|
liufuyang/kaggle-youtube-8m
|
1cfbbf92ec9b5ead791f98f7a09463ee165a8120
|
[
"MIT"
] | 15
|
2017-03-12T18:17:21.000Z
|
2019-07-16T21:52:24.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File paths for the Reddit Classification pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
TEMP_DIR = 'tmp'
TRANSFORM_FN_DIR = 'transform_fn'
RAW_METADATA_DIR = 'raw_metadata'
TRANSFORMED_METADATA_DIR = 'transformed_metadata'
TRANSFORMED_TRAIN_DATA_FILE_PREFIX = 'features_train'
TRANSFORMED_EVAL_DATA_FILE_PREFIX = 'features_eval'
TRANSFORMED_PREDICT_DATA_FILE_PREFIX = 'features_predict'
TRAIN_RESULTS_FILE = 'train_results'
DEPLOY_SAVED_MODEL_DIR = 'saved_model'
MODEL_DIR = 'model_dir'
MODEL_EVALUATIONS_FILE = 'model_evaluations'
BATCH_PREDICTION_RESULTS_FILE = 'batch_prediction_results'
| 37.352941
| 74
| 0.807874
|
23f891cddb45ff35c600092f13e618d6f99f5db0
| 2,063
|
py
|
Python
|
PythonBaseDemo/WINSOCKdemo/15.4/multicast_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/WINSOCKdemo/15.4/multicast_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/WINSOCKdemo/15.4/multicast_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import time, socket, threading, os
# 定义本机IP地址
SENDERIP = '192.168.1.88'
# 定义本地端口
SENDERPORT = 30000
# 定义本程序的多点广播IP地址
MYGROUP = '230.0.0.1'
# 通过type属性指定创建基于UDP协议的socket
s = socket.socket(type=socket.SOCK_DGRAM)
# 将该socket绑定到0.0.0.0的虚拟IP
s.bind(('0.0.0.0', SENDERPORT)) # ①
# 设置广播消息的TTL(Time-To-Live)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 64)
# 设置允许多点广播使用相同的端口
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 将socket进入广播组
status = s.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(MYGROUP) + socket.inet_aton(SENDERIP))
# 定义从socket读取数据的方法
def read_socket(sock):
while True:
data = sock.recv(2048)
print("信息: ", data.decode('utf-8'))
# 以read_socket作为target启动多线程
threading.Thread(target=read_socket, args=(s, )).start()
# 采用循环不断读取键盘输入,并输出到socket中
while True:
line = input('')
if line is None or line == 'exit':
break
os._exit(0)
# 将line输出到socket中
s.sendto(line.encode('utf-8'), (MYGROUP, SENDERPORT))
| 40.45098
| 73
| 0.428017
|
9044a36594457b9784f5addacc13cfaec75800b8
| 8,627
|
py
|
Python
|
lib/dgm.py
|
marcsv87/Deep-PDE-Solvers
|
3322704f0291dc98e5bdf8c81c57a95cbfe70981
|
[
"MIT"
] | 5
|
2021-01-27T03:32:59.000Z
|
2022-03-07T08:34:57.000Z
|
lib/dgm.py
|
msabvid/Deep-PDE-Solvers
|
3322704f0291dc98e5bdf8c81c57a95cbfe70981
|
[
"MIT"
] | null | null | null |
lib/dgm.py
|
msabvid/Deep-PDE-Solvers
|
3322704f0291dc98e5bdf8c81c57a95cbfe70981
|
[
"MIT"
] | 3
|
2021-02-05T04:56:40.000Z
|
2021-11-20T09:01:55.000Z
|
"""
Deep Galerkin Method: https://arxiv.org/abs/1708.07469
"""
import torch
import torch.nn as nn
import numpy as np
import tqdm
from lib.options import BaseOption
class DGM_Layer(nn.Module):
def __init__(self, dim_x, dim_S, activation='Tanh'):
super(DGM_Layer, self).__init__()
if activation == 'ReLU':
self.activation = nn.ReLU()
elif activation == 'Tanh':
self.activation = nn.Tanh()
elif activation == 'Sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'LogSigmoid':
self.activation = nn.LogSigmoid()
else:
raise ValueError("Unknown activation function {}".format(activation))
self.gate_Z = self.layer(dim_x+dim_S, dim_S)
self.gate_G = self.layer(dim_x+dim_S, dim_S)
self.gate_R = self.layer(dim_x+dim_S, dim_S)
self.gate_H = self.layer(dim_x+dim_S, dim_S)
def layer(self, nIn, nOut):
l = nn.Sequential(nn.Linear(nIn, nOut), self.activation)
return l
def forward(self, x, S):
x_S = torch.cat([x,S],1)
Z = self.gate_Z(x_S)
G = self.gate_G(x_S)
R = self.gate_R(x_S)
input_gate_H = torch.cat([x, S*R],1)
H = self.gate_H(input_gate_H)
output = ((1-G))*H + Z*S
return output
class Net_DGM(nn.Module):
def __init__(self, dim_x, dim_S, activation='Tanh'):
super(Net_DGM, self).__init__()
self.dim = dim_x
if activation == 'ReLU':
self.activation = nn.ReLU()
elif activation == 'Tanh':
self.activation = nn.Tanh()
elif activation == 'Sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'LogSigmoid':
self.activation = nn.LogSigmoid()
else:
raise ValueError("Unknown activation function {}".format(activation))
self.input_layer = nn.Sequential(nn.Linear(dim_x+1, dim_S), self.activation)
self.DGM1 = DGM_Layer(dim_x=dim_x+1, dim_S=dim_S, activation=activation)
self.DGM2 = DGM_Layer(dim_x=dim_x+1, dim_S=dim_S, activation=activation)
self.DGM3 = DGM_Layer(dim_x=dim_x+1, dim_S=dim_S, activation=activation)
self.output_layer = nn.Linear(dim_S, 1)
def forward(self,t,x):
tx = torch.cat([t,x], 1)
S1 = self.input_layer(tx)
S2 = self.DGM1(tx,S1)
S3 = self.DGM2(tx,S2)
S4 = self.DGM3(tx,S3)
output = self.output_layer(S4)
return output
def get_gradient(output, x):
grad = torch.autograd.grad(output, x, grad_outputs=torch.ones_like(output), create_graph=True, retain_graph=True, only_inputs=True)[0]
return grad
def get_laplacian(grad, x):
hess_diag = []
for d in range(x.shape[1]):
v = grad[:,d].view(-1,1)
grad2 = torch.autograd.grad(v,x,grad_outputs=torch.ones_like(v), only_inputs=True, create_graph=True, retain_graph=True)[0]
hess_diag.append(grad2[:,d].view(-1,1))
hess_diag = torch.cat(hess_diag,1)
laplacian = hess_diag.sum(1, keepdim=True)
return laplacian
class PDE_DGM_BlackScholes(nn.Module):
def __init__(self, d: int, hidden_dim: int, mu:float, sigma: float, ts: torch.Tensor=None):
super().__init__()
self.d = d
self.mu = mu
self.sigma = sigma
self.net_dgm = Net_DGM(d, hidden_dim, activation='Tanh')
self.ts = ts
def fit(self, max_updates: int, batch_size: int, option, device):
optimizer = torch.optim.Adam(self.net_dgm.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones = (10000,),gamma=0.1)
loss_fn = nn.MSELoss()
pbar = tqdm.tqdm(total=max_updates)
for it in range(max_updates):
optimizer.zero_grad()
input_domain = 0.5 + 2*torch.rand(batch_size, self.d, device=device, requires_grad=True)
t0, T = self.ts[0], self.ts[-1]
t = t0 + T*torch.rand(batch_size, 1, device=device, requires_grad=True)
u_of_tx = self.net_dgm(t, input_domain)
grad_u_x = get_gradient(u_of_tx,input_domain)
grad_u_t = get_gradient(u_of_tx, t)
laplacian = get_laplacian(grad_u_x, input_domain)
target_functional = torch.zeros_like(u_of_tx)
pde = grad_u_t + torch.sum(self.mu*input_domain.detach()*grad_u_x,1,keepdim=True) + 0.5*self.sigma**2 * laplacian - self.mu * u_of_tx
MSE_functional = loss_fn(pde, target_functional)
input_terminal = 0.5 + 2*torch.rand(batch_size, self.d, device=device, requires_grad=True)
t = torch.ones(batch_size, 1, device=device) * T
u_of_tx = self.net_dgm(t, input_terminal)
target_terminal = option.payoff(input_terminal)
MSE_terminal = loss_fn(u_of_tx, target_terminal)
loss = MSE_functional + MSE_terminal
loss.backward()
optimizer.step()
scheduler.step()
if it%10 == 0:
pbar.update(10)
pbar.write("Iteration: {}/{}\t MSE functional: {:.4f}\t MSE terminal: {:.4f}\t Total Loss: {:.4f}".format(it, max_updates, MSE_functional.item(), MSE_terminal.item(), loss.item()))
def sdeint(self, ts, x0, antithetic = False):
"""
Euler scheme to solve the SDE.
Parameters
----------
ts: troch.Tensor
timegrid. Vector of length N
x0: torch.Tensor
initial value of SDE. Tensor of shape (batch_size, d)
brownian: Optional.
torch.tensor of shape (batch_size, N, d)
Note
----
I am assuming uncorrelated Brownian motion
"""
if antithetic:
x = torch.cat([x0.unsqueeze(1), x0.unsqueeze(1)], dim=0)
else:
x = x0.unsqueeze(1)
batch_size = x.shape[0]
device = x.device
brownian_increments = torch.zeros(batch_size, len(ts), self.d, device=device)
for idx, t in enumerate(ts[1:]):
h = ts[idx+1]-ts[idx]
if antithetic:
brownian_increments[:batch_size//2,idx,:] = torch.randn(batch_size//2, self.d, device=device)*torch.sqrt(h)
brownian_increments[-batch_size//2:,idx,:] = -brownian_increments[:batch_size//2, idx, :].clone()
else:
brownian_increments[:,idx,:] = torch.randn(batch_size, self.d, device=device)*torch.sqrt(h)
x_new = x[:,-1,:] + self.mu*x[:,-1,:]*h + self.sigma*x[:,-1,:]*brownian_increments[:,idx,:]
x = torch.cat([x, x_new.unsqueeze(1)],1)
return x, brownian_increments
def unbiased_price(self, ts: torch.Tensor, x0:torch.Tensor, option: BaseOption, MC_samples: int, ):
"""
We calculate an unbiased estimator of the price at time t=0 (for now) using Monte Carlo, and the stochastic integral as a control variate
Parameters
----------
ts: troch.Tensor
timegrid. Vector of length N
x0: torch.Tensor
initial value of SDE. Tensor of shape (1, d)
option: object of class option to calculate payoff
MC_samples: int
Monte Carlo samples
"""
assert x0.shape[0] == 1, "we need just 1 sample"
x0 = x0.repeat(MC_samples, 1)
with torch.no_grad():
x, brownian_increments = self.sdeint(ts, x0)
payoff = option.payoff(x[:,-1,:]) # (batch_size, 1)
device = x.device
batch_size = x.shape[0]
t = ts.reshape(1,-1,1).repeat(batch_size,1,1)
tx = torch.cat([t,x],2) # (batch_size, L, dim+1)
Z = []
for idt, t in enumerate(ts[:-1]):
tt = torch.ones(batch_size, 1, device=device)*t
tt.requires_grad_(True)
xx = x[:,idt,:].requires_grad_(True)
Y = self.net_dgm(tt, xx)
Z.append(get_gradient(Y,xx))
Z = torch.stack(Z, 1)
stoch_int = 0
for idx,t in enumerate(ts[:-1]):
discount_factor = torch.exp(-self.mu*t)
stoch_int += discount_factor * torch.sum(Z[:,idx,:]*brownian_increments[:,idx,:], 1, keepdim=True)
mc = torch.exp(-self.mu*ts[-1])*payoff
cv = stoch_int
cv_mult = torch.mean((mc-mc.mean())*(cv-cv.mean())) / cv.var() # optimal multiplier. cf. Belomestny book
return mc, mc - cv_mult*stoch_int # stoch_int has expected value 0, thus it doesn't add any bias to the MC estimator, and it is correlated with payoff
| 38.86036
| 196
| 0.588849
|
858121d9f4460e5291a0092474472dd95a4c8edf
| 61,299
|
py
|
Python
|
moto/events/models.py
|
number09/moto
|
9e61ab22207454f69a84a7a9afaeef9eebafec43
|
[
"Apache-2.0"
] | null | null | null |
moto/events/models.py
|
number09/moto
|
9e61ab22207454f69a84a7a9afaeef9eebafec43
|
[
"Apache-2.0"
] | null | null | null |
moto/events/models.py
|
number09/moto
|
9e61ab22207454f69a84a7a9afaeef9eebafec43
|
[
"Apache-2.0"
] | null | null | null |
import copy
import os
import re
import json
import sys
import warnings
from collections import namedtuple
from datetime import datetime
from enum import Enum, unique
from json import JSONDecodeError
from operator import lt, le, eq, ge, gt
from boto3 import Session
from moto.core.exceptions import JsonRESTError
from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel, BaseModel
from moto.core.utils import unix_time, iso_8601_datetime_without_milliseconds
from moto.events.exceptions import (
ValidationException,
ResourceNotFoundException,
ResourceAlreadyExistsException,
InvalidEventPatternException,
IllegalStatusException,
)
from moto.utilities.tagging_service import TaggingService
from uuid import uuid4
class Rule(CloudFormationModel):
Arn = namedtuple("Arn", ["service", "resource_type", "resource_id"])
def __init__(
self,
name,
region_name,
description,
event_pattern,
schedule_exp,
role_arn,
event_bus_name,
state,
managed_by=None,
targets=None,
):
self.name = name
self.region_name = region_name
self.description = description
self.event_pattern = EventPattern.load(event_pattern)
self.scheduled_expression = schedule_exp
self.role_arn = role_arn
self.event_bus_name = event_bus_name
self.state = state or "ENABLED"
self.managed_by = managed_by # can only be set by AWS services
self.created_by = ACCOUNT_ID
self.targets = targets or []
@property
def arn(self):
event_bus_name = (
""
if self.event_bus_name == "default"
else "{}/".format(self.event_bus_name)
)
return "arn:aws:events:{region}:{account_id}:rule/{event_bus_name}{name}".format(
region=self.region_name,
account_id=ACCOUNT_ID,
event_bus_name=event_bus_name,
name=self.name,
)
@property
def physical_resource_id(self):
return self.name
# This song and dance for targets is because we need order for Limits and NextTokens, but can't use OrderedDicts
# with Python 2.6, so tracking it with an array it is.
def _check_target_exists(self, target_id):
for i in range(0, len(self.targets)):
if target_id == self.targets[i]["Id"]:
return i
return None
def enable(self):
self.state = "ENABLED"
def disable(self):
self.state = "DISABLED"
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.delete_rule(name=self.name)
def put_targets(self, targets):
# Not testing for valid ARNs.
for target in targets:
index = self._check_target_exists(target["Id"])
if index is not None:
self.targets[index] = target
else:
self.targets.append(target)
def remove_targets(self, ids):
for target_id in ids:
index = self._check_target_exists(target_id)
if index is not None:
self.targets.pop(index)
def send_to_targets(self, event_bus_name, event):
event_bus_name = event_bus_name.split("/")[-1]
if event_bus_name != self.event_bus_name.split("/")[-1]:
return
if not self.event_pattern.matches_event(event):
return
# supported targets
# - CloudWatch Log Group
# - EventBridge Archive
# - SQS Queue + FIFO Queue
for target in self.targets:
arn = self._parse_arn(target["Arn"])
if arn.service == "logs" and arn.resource_type == "log-group":
self._send_to_cw_log_group(arn.resource_id, event)
elif arn.service == "events" and not arn.resource_type:
input_template = json.loads(target["InputTransformer"]["InputTemplate"])
archive_arn = self._parse_arn(input_template["archive-arn"])
self._send_to_events_archive(archive_arn.resource_id, event)
elif arn.service == "sqs":
group_id = target.get("SqsParameters", {}).get("MessageGroupId")
self._send_to_sqs_queue(arn.resource_id, event, group_id)
else:
raise NotImplementedError("Expr not defined for {0}".format(type(self)))
def _parse_arn(self, arn):
# http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
# this method needs probably some more fine tuning,
# when also other targets are supported
elements = arn.split(":", 5)
service = elements[2]
resource = elements[5]
if ":" in resource and "/" in resource:
if resource.index(":") < resource.index("/"):
resource_type, resource_id = resource.split(":", 1)
else:
resource_type, resource_id = resource.split("/", 1)
elif ":" in resource:
resource_type, resource_id = resource.split(":", 1)
elif "/" in resource:
resource_type, resource_id = resource.split("/", 1)
else:
resource_type = None
resource_id = resource
return self.Arn(
service=service, resource_type=resource_type, resource_id=resource_id
)
def _send_to_cw_log_group(self, name, event):
from moto.logs import logs_backends
event_copy = copy.deepcopy(event)
event_copy["time"] = iso_8601_datetime_without_milliseconds(
datetime.utcfromtimestamp(event_copy["time"])
)
log_stream_name = str(uuid4())
log_events = [
{
"timestamp": unix_time(datetime.utcnow()),
"message": json.dumps(event_copy),
}
]
logs_backends[self.region_name].create_log_stream(name, log_stream_name)
logs_backends[self.region_name].put_log_events(
name, log_stream_name, log_events, None
)
def _send_to_events_archive(self, resource_id, event):
archive_name, archive_uuid = resource_id.split(":")
archive = events_backends[self.region_name].archives.get(archive_name)
if archive.uuid == archive_uuid:
archive.events.append(event)
def _send_to_sqs_queue(self, resource_id, event, group_id=None):
from moto.sqs import sqs_backends
event_copy = copy.deepcopy(event)
event_copy["time"] = iso_8601_datetime_without_milliseconds(
datetime.utcfromtimestamp(event_copy["time"])
)
if group_id:
queue_attr = sqs_backends[self.region_name].get_queue_attributes(
queue_name=resource_id, attribute_names=["ContentBasedDeduplication"]
)
if queue_attr["ContentBasedDeduplication"] == "false":
warnings.warn(
"To let EventBridge send messages to your SQS FIFO queue, "
"you must enable content-based deduplication."
)
return
sqs_backends[self.region_name].send_message(
queue_name=resource_id,
message_body=json.dumps(event_copy),
group_id=group_id,
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "Name"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html
return "AWS::Events::Rule"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
properties.setdefault("EventBusName", "default")
event_name = resource_name
event_pattern = properties.get("EventPattern")
scheduled_expression = properties.get("ScheduleExpression")
state = properties.get("State")
desc = properties.get("Description")
role_arn = properties.get("RoleArn")
event_bus_name = properties.get("EventBusName")
tags = properties.get("Tags")
backend = events_backends[region_name]
return backend.put_rule(
event_name,
scheduled_expression=scheduled_expression,
event_pattern=event_pattern,
state=state,
description=desc,
role_arn=role_arn,
event_bus_name=event_bus_name,
tags=tags,
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
event_backend = events_backends[region_name]
event_name = resource_name
event_backend.delete_rule(name=event_name)
def describe(self):
attributes = {
"Arn": self.arn,
"CreatedBy": self.created_by,
"Description": self.description,
"EventBusName": self.event_bus_name,
"EventPattern": self.event_pattern.dump(),
"ManagedBy": self.managed_by,
"Name": self.name,
"RoleArn": self.role_arn,
"ScheduleExpression": self.scheduled_expression,
"State": self.state,
}
attributes = {
attr: value for attr, value in attributes.items() if value is not None
}
return attributes
class EventBus(CloudFormationModel):
def __init__(self, region_name, name, tags=None):
self.region = region_name
self.name = name
self.tags = tags or []
self._statements = {}
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:event-bus/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
@property
def policy(self):
if self._statements:
policy = {
"Version": "2012-10-17",
"Statement": [stmt.describe() for stmt in self._statements.values()],
}
return json.dumps(policy)
return None
def has_permissions(self):
return len(self._statements) > 0
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.delete_event_bus(name=self.name)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
elif attribute_name == "Name":
return self.name
elif attribute_name == "Policy":
return self.policy
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "Name"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-eventbus.html
return "AWS::Events::EventBus"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
event_name = resource_name
event_source_name = properties.get("EventSourceName")
return event_backend.create_event_bus(
name=event_name, event_source_name=event_source_name
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
event_backend = events_backends[region_name]
event_bus_name = resource_name
event_backend.delete_event_bus(event_bus_name)
def _remove_principals_statements(self, *principals):
statements_to_delete = set()
for principal in principals:
for sid, statement in self._statements.items():
if statement.principal == principal:
statements_to_delete.add(sid)
# This is done separately to avoid:
# RuntimeError: dictionary changed size during iteration
for sid in statements_to_delete:
del self._statements[sid]
def add_permission(self, statement_id, action, principal, condition):
self._remove_principals_statements(principal)
statement = EventBusPolicyStatement(
sid=statement_id,
action=action,
principal=principal,
condition=condition,
resource=self.arn,
)
self._statements[statement_id] = statement
def add_policy(self, policy):
policy_statements = policy["Statement"]
principals = [stmt["Principal"] for stmt in policy_statements]
self._remove_principals_statements(*principals)
for new_statement in policy_statements:
sid = new_statement["Sid"]
self._statements[sid] = EventBusPolicyStatement.from_dict(new_statement)
def remove_statement(self, sid):
return self._statements.pop(sid, None)
def remove_statements(self):
self._statements.clear()
class EventBusPolicyStatement:
def __init__(
self, sid, principal, action, resource, effect="Allow", condition=None
):
self.sid = sid
self.principal = principal
self.action = action
self.resource = resource
self.effect = effect
self.condition = condition
def describe(self):
statement = dict(
Sid=self.sid,
Effect=self.effect,
Principal=self.principal,
Action=self.action,
Resource=self.resource,
)
if self.condition:
statement["Condition"] = self.condition
return statement
@classmethod
def from_dict(cls, statement_dict):
params = dict(
sid=statement_dict["Sid"],
effect=statement_dict["Effect"],
principal=statement_dict["Principal"],
action=statement_dict["Action"],
resource=statement_dict["Resource"],
)
condition = statement_dict.get("Condition")
if condition:
params["condition"] = condition
return cls(**params)
class Archive(CloudFormationModel):
# https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListArchives.html#API_ListArchives_RequestParameters
VALID_STATES = [
"ENABLED",
"DISABLED",
"CREATING",
"UPDATING",
"CREATE_FAILED",
"UPDATE_FAILED",
]
def __init__(
self, region_name, name, source_arn, description, event_pattern, retention
):
self.region = region_name
self.name = name
self.source_arn = source_arn
self.description = description
self.event_pattern = EventPattern.load(event_pattern)
self.retention = retention if retention else 0
self.creation_time = unix_time(datetime.utcnow())
self.state = "ENABLED"
self.uuid = str(uuid4())
self.events = []
self.event_bus_name = source_arn.split("/")[-1]
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:archive/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
def describe_short(self):
return {
"ArchiveName": self.name,
"EventSourceArn": self.source_arn,
"State": self.state,
"RetentionDays": self.retention,
"SizeBytes": sys.getsizeof(self.events) if len(self.events) > 0 else 0,
"EventCount": len(self.events),
"CreationTime": self.creation_time,
}
def describe(self):
result = {
"ArchiveArn": self.arn,
"Description": self.description,
"EventPattern": self.event_pattern.dump(),
}
result.update(self.describe_short())
return result
def update(self, description, event_pattern, retention):
if description:
self.description = description
if event_pattern:
self.event_pattern = EventPattern.load(event_pattern)
if retention:
self.retention = retention
def delete(self, region_name):
event_backend = events_backends[region_name]
event_backend.archives.pop(self.name)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "ArchiveName":
return self.name
elif attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return "ArchiveName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-archive.html
return "AWS::Events::Archive"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
properties = cloudformation_json["Properties"]
event_backend = events_backends[region_name]
source_arn = properties.get("SourceArn")
description = properties.get("Description")
event_pattern = properties.get("EventPattern")
retention = properties.get("RetentionDays")
return event_backend.create_archive(
resource_name, source_arn, description, event_pattern, retention
)
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name
):
if new_resource_name == original_resource.name:
properties = cloudformation_json["Properties"]
original_resource.update(
properties.get("Description"),
properties.get("EventPattern"),
properties.get("Retention"),
)
return original_resource
else:
original_resource.delete(region_name)
return cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
event_backend = events_backends[region_name]
event_backend.delete_archive(resource_name)
@unique
class ReplayState(Enum):
# https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_ListReplays.html#API_ListReplays_RequestParameters
STARTING = "STARTING"
RUNNING = "RUNNING"
CANCELLING = "CANCELLING"
COMPLETED = "COMPLETED"
CANCELLED = "CANCELLED"
FAILED = "FAILED"
class Replay(BaseModel):
def __init__(
self,
region_name,
name,
description,
source_arn,
start_time,
end_time,
destination,
):
self.region = region_name
self.name = name
self.description = description
self.source_arn = source_arn
self.event_start_time = start_time
self.event_end_time = end_time
self.destination = destination
self.state = ReplayState.STARTING
self.start_time = unix_time(datetime.utcnow())
self.end_time = None
@property
def arn(self):
return "arn:aws:events:{region}:{account_id}:replay/{name}".format(
region=self.region, account_id=ACCOUNT_ID, name=self.name
)
def describe_short(self):
return {
"ReplayName": self.name,
"EventSourceArn": self.source_arn,
"State": self.state.value,
"EventStartTime": self.event_start_time,
"EventEndTime": self.event_end_time,
"ReplayStartTime": self.start_time,
"ReplayEndTime": self.end_time,
}
def describe(self):
result = {
"ReplayArn": self.arn,
"Description": self.description,
"Destination": self.destination,
}
result.update(self.describe_short())
return result
def replay_events(self, archive):
event_bus_name = self.destination["Arn"].split("/")[-1]
for event in archive.events:
for rule in events_backends[self.region].rules.values():
rule.send_to_targets(
event_bus_name,
dict(event, **{"id": str(uuid4()), "replay-name": self.name}),
)
self.state = ReplayState.COMPLETED
self.end_time = unix_time(datetime.utcnow())
class Connection(BaseModel):
def __init__(
self, name, region_name, description, authorization_type, auth_parameters,
):
self.uuid = uuid4()
self.name = name
self.region = region_name
self.description = description
self.authorization_type = authorization_type
self.auth_parameters = auth_parameters
self.creation_time = unix_time(datetime.utcnow())
self.state = "AUTHORIZED"
@property
def arn(self):
return "arn:aws:events:{0}:{1}:connection/{2}/{3}".format(
self.region, ACCOUNT_ID, self.name, self.uuid
)
def describe_short(self):
"""
Create the short description for the Connection object.
Taken our from the Response Syntax of this API doc:
- https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteConnection.html
Something to consider:
- The original response also has
- LastAuthorizedTime (number)
- LastModifiedTime (number)
- At the time of implementing this, there was no place where to set/get
those attributes. That is why they are not in the response.
Returns:
dict
"""
return {
"ConnectionArn": self.arn,
"ConnectionState": self.state,
"CreationTime": self.creation_time,
}
def describe(self):
"""
Create a complete description for the Connection object.
Taken our from the Response Syntax of this API doc:
- https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeConnection.html
Something to consider:
- The original response also has:
- LastAuthorizedTime (number)
- LastModifiedTime (number)
- SecretArn (string)
- StateReason (string)
- At the time of implementing this, there was no place where to set/get
those attributes. That is why they are not in the response.
Returns:
dict
"""
return {
"AuthorizationType": self.authorization_type,
"AuthParameters": self.auth_parameters,
"ConnectionArn": self.arn,
"ConnectionState": self.state,
"CreationTime": self.creation_time,
"Description": self.description,
"Name": self.name,
}
class Destination(BaseModel):
def __init__(
self,
name,
region_name,
description,
connection_arn,
invocation_endpoint,
invocation_rate_limit_per_second,
http_method,
):
self.uuid = uuid4()
self.name = name
self.region = region_name
self.description = description
self.connection_arn = connection_arn
self.invocation_endpoint = invocation_endpoint
self.invocation_rate_limit_per_second = invocation_rate_limit_per_second
self.creation_time = unix_time(datetime.utcnow())
self.http_method = http_method
self.state = "ACTIVE"
@property
def arn(self):
return "arn:aws:events:{0}:{1}:api-destination/{2}/{3}".format(
self.region, ACCOUNT_ID, self.name, self.uuid
)
def describe(self):
"""
Describes the Destination object as a dict
Docs:
Response Syntax in
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeApiDestination.html
Something to consider:
- The response also has [InvocationRateLimitPerSecond] which was not
available when implementing this method
Returns:
dict
"""
return {
"ApiDestinationArn": self.arn,
"ApiDestinationState": self.state,
"ConnectionArn": self.connection_arn,
"CreationTime": self.creation_time,
"Description": self.description,
"HttpMethod": self.http_method,
"InvocationEndpoint": self.invocation_endpoint,
"InvocationRateLimitPerSecond": self.invocation_rate_limit_per_second,
"LastModifiedTime": self.creation_time,
"Name": self.name,
}
def describe_short(self):
return {
"ApiDestinationArn": self.arn,
"ApiDestinationState": self.state,
"CreationTime": self.creation_time,
"LastModifiedTime": self.creation_time,
}
class EventPattern:
def __init__(self, raw_pattern, pattern):
self._raw_pattern = raw_pattern
self._pattern = pattern
def matches_event(self, event):
if not self._pattern:
return True
event = json.loads(json.dumps(event))
return self._does_event_match(event, self._pattern)
def _does_event_match(self, event, pattern):
items_and_filters = [(event.get(k), v) for k, v in pattern.items()]
nested_filter_matches = [
self._does_event_match(item, nested_filter)
for item, nested_filter in items_and_filters
if isinstance(nested_filter, dict)
]
filter_list_matches = [
self._does_item_match_filters(item, filter_list)
for item, filter_list in items_and_filters
if isinstance(filter_list, list)
]
return all(nested_filter_matches + filter_list_matches)
def _does_item_match_filters(self, item, filters):
allowed_values = [value for value in filters if isinstance(value, str)]
allowed_values_match = item in allowed_values if allowed_values else True
named_filter_matches = [
self._does_item_match_named_filter(item, pattern)
for pattern in filters
if isinstance(pattern, dict)
]
return allowed_values_match and all(named_filter_matches)
@staticmethod
def _does_item_match_named_filter(item, pattern):
filter_name, filter_value = list(pattern.items())[0]
if filter_name == "exists":
is_leaf_node = not isinstance(item, dict)
leaf_exists = is_leaf_node and item is not None
should_exist = filter_value
return leaf_exists if should_exist else not leaf_exists
if filter_name == "prefix":
prefix = filter_value
return item.startswith(prefix)
if filter_name == "numeric":
as_function = {"<": lt, "<=": le, "=": eq, ">=": ge, ">": gt}
operators_and_values = zip(filter_value[::2], filter_value[1::2])
numeric_matches = [
as_function[operator](item, value)
for operator, value in operators_and_values
]
return all(numeric_matches)
else:
warnings.warn(
"'{}' filter logic unimplemented. defaulting to True".format(
filter_name
)
)
return True
@classmethod
def load(cls, raw_pattern):
parser = EventPatternParser(raw_pattern)
pattern = parser.parse()
return cls(raw_pattern, pattern)
def dump(self):
return self._raw_pattern
class EventPatternParser:
def __init__(self, pattern):
self.pattern = pattern
def _validate_event_pattern(self, pattern):
# values in the event pattern have to be either a dict or an array
for attr, value in pattern.items():
if isinstance(value, dict):
self._validate_event_pattern(value)
elif isinstance(value, list):
if len(value) == 0:
raise InvalidEventPatternException(
reason="Empty arrays are not allowed"
)
else:
raise InvalidEventPatternException(
reason=f"'{attr}' must be an object or an array"
)
def parse(self):
try:
parsed_pattern = json.loads(self.pattern) if self.pattern else dict()
self._validate_event_pattern(parsed_pattern)
return parsed_pattern
except JSONDecodeError:
raise InvalidEventPatternException(reason="Invalid JSON")
class EventsBackend(BaseBackend):
ACCOUNT_ID = re.compile(r"^(\d{1,12}|\*)$")
STATEMENT_ID = re.compile(r"^[a-zA-Z0-9-_]{1,64}$")
_CRON_REGEX = re.compile(r"^cron\(.*\)")
_RATE_REGEX = re.compile(r"^rate\(\d*\s(minute|minutes|hour|hours|day|days)\)")
def __init__(self, region_name):
self.rules = {}
# This array tracks the order in which the rules have been added, since
# 2.6 doesn't have OrderedDicts.
self.rules_order = []
self.next_tokens = {}
self.region_name = region_name
self.event_buses = {}
self.event_sources = {}
self.archives = {}
self.replays = {}
self.tagger = TaggingService()
self._add_default_event_bus()
self.connections = {}
self.destinations = {}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def _add_default_event_bus(self):
self.event_buses["default"] = EventBus(self.region_name, "default")
def _get_rule_by_index(self, i):
return self.rules.get(self.rules_order[i])
def _gen_next_token(self, index):
token = os.urandom(128).encode("base64")
self.next_tokens[token] = index
return token
def _process_token_and_limits(self, array_len, next_token=None, limit=None):
start_index = 0
end_index = array_len
new_next_token = None
if next_token:
start_index = self.next_tokens.pop(next_token, 0)
if limit is not None:
new_end_index = start_index + int(limit)
if new_end_index < end_index:
end_index = new_end_index
new_next_token = self._gen_next_token(end_index)
return start_index, end_index, new_next_token
def _get_event_bus(self, name):
event_bus_name = name.split("/")[-1]
event_bus = self.event_buses.get(event_bus_name)
if not event_bus:
raise ResourceNotFoundException(
"Event bus {} does not exist.".format(event_bus_name)
)
return event_bus
def _get_replay(self, name):
replay = self.replays.get(name)
if not replay:
raise ResourceNotFoundException("Replay {} does not exist.".format(name))
return replay
def put_rule(
self,
name,
*,
description=None,
event_bus_name=None,
event_pattern=None,
role_arn=None,
scheduled_expression=None,
state=None,
managed_by=None,
tags=None,
):
event_bus_name = event_bus_name or "default"
if not event_pattern and not scheduled_expression:
raise JsonRESTError(
"ValidationException",
"Parameter(s) EventPattern or ScheduleExpression must be specified.",
)
if scheduled_expression:
if event_bus_name != "default":
raise ValidationException(
"ScheduleExpression is supported only on the default event bus."
)
if not (
self._CRON_REGEX.match(scheduled_expression)
or self._RATE_REGEX.match(scheduled_expression)
):
raise ValidationException("Parameter ScheduleExpression is not valid.")
existing_rule = self.rules.get(name)
targets = existing_rule.targets if existing_rule else list()
rule = Rule(
name,
self.region_name,
description,
event_pattern,
scheduled_expression,
role_arn,
event_bus_name,
state,
managed_by,
targets=targets,
)
self.rules[name] = rule
self.rules_order.append(name)
if tags:
self.tagger.tag_resource(rule.arn, tags)
return rule
def delete_rule(self, name):
self.rules_order.pop(self.rules_order.index(name))
arn = self.rules.get(name).arn
if self.tagger.has_tags(arn):
self.tagger.delete_all_tags_for_resource(arn)
return self.rules.pop(name) is not None
def describe_rule(self, name):
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException("Rule {} does not exist.".format(name))
return rule
def disable_rule(self, name):
if name in self.rules:
self.rules[name].disable()
return True
return False
def enable_rule(self, name):
if name in self.rules:
self.rules[name].enable()
return True
return False
def list_rule_names_by_target(self, target_arn, next_token=None, limit=None):
matching_rules = []
return_obj = {}
start_index, end_index, new_next_token = self._process_token_and_limits(
len(self.rules), next_token, limit
)
for i in range(start_index, end_index):
rule = self._get_rule_by_index(i)
for target in rule.targets:
if target["Arn"] == target_arn:
matching_rules.append(rule.name)
return_obj["RuleNames"] = matching_rules
if new_next_token is not None:
return_obj["NextToken"] = new_next_token
return return_obj
def list_rules(self, prefix=None, next_token=None, limit=None):
match_string = ".*"
if prefix is not None:
match_string = "^" + prefix + match_string
match_regex = re.compile(match_string)
matching_rules = []
return_obj = {}
start_index, end_index, new_next_token = self._process_token_and_limits(
len(self.rules), next_token, limit
)
for i in range(start_index, end_index):
rule = self._get_rule_by_index(i)
if match_regex.match(rule.name):
matching_rules.append(rule)
return_obj["Rules"] = matching_rules
if new_next_token is not None:
return_obj["NextToken"] = new_next_token
return return_obj
def list_targets_by_rule(self, rule, next_token=None, limit=None):
# We'll let a KeyError exception be thrown for response to handle if
# rule doesn't exist.
rule = self.rules[rule]
start_index, end_index, new_next_token = self._process_token_and_limits(
len(rule.targets), next_token, limit
)
returned_targets = []
return_obj = {}
for i in range(start_index, end_index):
returned_targets.append(rule.targets[i])
return_obj["Targets"] = returned_targets
if new_next_token is not None:
return_obj["NextToken"] = new_next_token
return return_obj
def put_targets(self, name, event_bus_name, targets):
# super simple ARN check
invalid_arn = next(
(
target["Arn"]
for target in targets
if not re.match(r"arn:[\d\w:\-/]*", target["Arn"])
),
None,
)
if invalid_arn:
raise ValidationException(
"Parameter {} is not valid. "
"Reason: Provided Arn is not in correct format.".format(invalid_arn)
)
for target in targets:
arn = target["Arn"]
if (
":sqs:" in arn
and arn.endswith(".fifo")
and not target.get("SqsParameters")
):
raise ValidationException(
"Parameter(s) SqsParameters must be specified for target: {}.".format(
target["Id"]
)
)
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus {1}.".format(name, event_bus_name)
)
rule.put_targets(targets)
def put_events(self, events):
num_events = len(events)
if num_events > 10:
# the exact error text is longer, the Value list consists of all the put events
raise ValidationException(
"1 validation error detected: "
"Value '[PutEventsRequestEntry]' at 'entries' failed to satisfy constraint: "
"Member must have length less than or equal to 10"
)
entries = []
for event in events:
if "Source" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter Source is not valid. Reason: Source is a required argument.",
}
)
elif "DetailType" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter DetailType is not valid. Reason: DetailType is a required argument.",
}
)
elif "Detail" not in event:
entries.append(
{
"ErrorCode": "InvalidArgument",
"ErrorMessage": "Parameter Detail is not valid. Reason: Detail is a required argument.",
}
)
else:
try:
json.loads(event["Detail"])
except ValueError: # json.JSONDecodeError exists since Python 3.5
entries.append(
{
"ErrorCode": "MalformedDetail",
"ErrorMessage": "Detail is malformed.",
}
)
continue
event_id = str(uuid4())
entries.append({"EventId": event_id})
# if 'EventBusName' is not especially set, it will be sent to the default one
event_bus_name = event.get("EventBusName", "default")
for rule in self.rules.values():
rule.send_to_targets(
event_bus_name,
{
"version": "0",
"id": event_id,
"detail-type": event["DetailType"],
"source": event["Source"],
"account": ACCOUNT_ID,
"time": event.get("Time", unix_time(datetime.utcnow())),
"region": self.region_name,
"resources": event.get("Resources", []),
"detail": json.loads(event["Detail"]),
},
)
return entries
def remove_targets(self, name, event_bus_name, ids):
rule = self.rules.get(name)
if not rule:
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus {1}.".format(name, event_bus_name)
)
rule.remove_targets(ids)
def test_event_pattern(self):
raise NotImplementedError()
@staticmethod
def _put_permission_from_policy(event_bus, policy):
try:
policy_doc = json.loads(policy)
event_bus.add_policy(policy_doc)
except JSONDecodeError:
raise JsonRESTError(
"ValidationException", "This policy contains invalid Json"
)
@staticmethod
def _condition_param_to_stmt_condition(condition):
if condition:
key = condition["Key"]
value = condition["Value"]
condition_type = condition["Type"]
return {condition_type: {key: value}}
return None
def _put_permission_from_params(
self, event_bus, action, principal, statement_id, condition
):
if principal is None:
raise JsonRESTError(
"ValidationException", "Parameter Principal must be specified."
)
if condition and principal != "*":
raise JsonRESTError(
"InvalidParameterValue",
"Value of the parameter 'principal' must be '*' when the parameter 'condition' is set.",
)
if not condition and self.ACCOUNT_ID.match(principal) is None:
raise JsonRESTError(
"InvalidParameterValue",
f"Value {principal} at 'principal' failed to satisfy constraint: "
r"Member must satisfy regular expression pattern: (\d{12}|\*)",
)
if action is None or action != "events:PutEvents":
raise JsonRESTError(
"ValidationException",
"Provided value in parameter 'action' is not supported.",
)
if statement_id is None or self.STATEMENT_ID.match(statement_id) is None:
raise JsonRESTError(
"InvalidParameterValue", r"StatementId must match ^[a-zA-Z0-9-_]{1,64}$"
)
principal = {"AWS": f"arn:aws:iam::{principal}:root"}
stmt_condition = self._condition_param_to_stmt_condition(condition)
event_bus.add_permission(statement_id, action, principal, stmt_condition)
def put_permission(
self, event_bus_name, action, principal, statement_id, condition, policy
):
if not event_bus_name:
event_bus_name = "default"
event_bus = self.describe_event_bus(event_bus_name)
if policy:
self._put_permission_from_policy(event_bus, policy)
else:
self._put_permission_from_params(
event_bus, action, principal, statement_id, condition
)
def remove_permission(self, event_bus_name, statement_id, remove_all_permissions):
if not event_bus_name:
event_bus_name = "default"
event_bus = self.describe_event_bus(event_bus_name)
if remove_all_permissions:
event_bus.remove_statements()
else:
if not event_bus.has_permissions():
raise JsonRESTError(
"ResourceNotFoundException", "EventBus does not have a policy."
)
statement = event_bus.remove_statement(statement_id)
if not statement:
raise JsonRESTError(
"ResourceNotFoundException",
"Statement with the provided id does not exist.",
)
def describe_event_bus(self, name):
if not name:
name = "default"
event_bus = self._get_event_bus(name)
return event_bus
def create_event_bus(self, name, event_source_name=None, tags=None):
if name in self.event_buses:
raise JsonRESTError(
"ResourceAlreadyExistsException",
"Event bus {} already exists.".format(name),
)
if not event_source_name and "/" in name:
raise JsonRESTError(
"ValidationException", "Event bus name must not contain '/'."
)
if event_source_name and event_source_name not in self.event_sources:
raise JsonRESTError(
"ResourceNotFoundException",
"Event source {} does not exist.".format(event_source_name),
)
event_bus = EventBus(self.region_name, name, tags=tags)
self.event_buses[name] = event_bus
if tags:
self.tagger.tag_resource(event_bus.arn, tags)
return self.event_buses[name]
def list_event_buses(self, name_prefix):
if name_prefix:
return [
event_bus
for event_bus in self.event_buses.values()
if event_bus.name.startswith(name_prefix)
]
return list(self.event_buses.values())
def delete_event_bus(self, name):
if name == "default":
raise JsonRESTError(
"ValidationException", "Cannot delete event bus default."
)
event_bus = self.event_buses.pop(name, None)
if event_bus:
self.tagger.delete_all_tags_for_resource(event_bus.arn)
def list_tags_for_resource(self, arn):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
return self.tagger.list_tags_for_resource(registry[name].arn)
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def tag_resource(self, arn, tags):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
self.tagger.tag_resource(registry[name].arn, tags)
return {}
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def untag_resource(self, arn, tag_names):
name = arn.split("/")[-1]
registries = [self.rules, self.event_buses]
for registry in registries:
if name in registry:
self.tagger.untag_resource_using_names(registry[name].arn, tag_names)
return {}
raise ResourceNotFoundException(
"Rule {0} does not exist on EventBus default.".format(name)
)
def create_archive(self, name, source_arn, description, event_pattern, retention):
if len(name) > 48:
raise ValidationException(
" 1 validation error detected: "
"Value '{}' at 'archiveName' failed to satisfy constraint: "
"Member must have length less than or equal to 48".format(name)
)
event_bus = self._get_event_bus(source_arn)
if name in self.archives:
raise ResourceAlreadyExistsException(
"Archive {} already exists.".format(name)
)
archive = Archive(
self.region_name, name, source_arn, description, event_pattern, retention
)
rule_event_pattern = json.loads(event_pattern or "{}")
rule_event_pattern["replay-name"] = [{"exists": False}]
rule_name = "Events-Archive-{}".format(name)
rule = self.put_rule(
rule_name,
event_pattern=json.dumps(rule_event_pattern),
event_bus_name=event_bus.name,
managed_by="prod.vhs.events.aws.internal",
)
self.put_targets(
rule.name,
rule.event_bus_name,
[
{
"Id": rule.name,
"Arn": "arn:aws:events:{}:::".format(self.region_name),
"InputTransformer": {
"InputPathsMap": {},
"InputTemplate": json.dumps(
{
"archive-arn": "{0}:{1}".format(
archive.arn, archive.uuid
),
"event": "<aws.events.event.json>",
"ingestion-time": "<aws.events.event.ingestion-time>",
}
),
},
}
],
)
self.archives[name] = archive
return archive
def describe_archive(self, name):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
return archive.describe()
def list_archives(self, name_prefix, source_arn, state):
if [name_prefix, source_arn, state].count(None) < 2:
raise ValidationException(
"At most one filter is allowed for ListArchives. "
"Use either : State, EventSourceArn, or NamePrefix."
)
if state and state not in Archive.VALID_STATES:
raise ValidationException(
"1 validation error detected: "
"Value '{0}' at 'state' failed to satisfy constraint: "
"Member must satisfy enum value set: "
"[{1}]".format(state, ", ".join(Archive.VALID_STATES))
)
if [name_prefix, source_arn, state].count(None) == 3:
return [archive.describe_short() for archive in self.archives.values()]
result = []
for archive in self.archives.values():
if name_prefix and archive.name.startswith(name_prefix):
result.append(archive.describe_short())
elif source_arn and archive.source_arn == source_arn:
result.append(archive.describe_short())
elif state and archive.state == state:
result.append(archive.describe_short())
return result
def update_archive(self, name, description, event_pattern, retention):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
archive.update(description, event_pattern, retention)
return {
"ArchiveArn": archive.arn,
"CreationTime": archive.creation_time,
"State": archive.state,
}
def delete_archive(self, name):
archive = self.archives.get(name)
if not archive:
raise ResourceNotFoundException("Archive {} does not exist.".format(name))
archive.delete(self.region_name)
def start_replay(
self, name, description, source_arn, start_time, end_time, destination
):
event_bus_arn = destination["Arn"]
event_bus_arn_pattern = r"^arn:aws:events:[a-zA-Z0-9-]+:\d{12}:event-bus/"
if not re.match(event_bus_arn_pattern, event_bus_arn):
raise ValidationException(
"Parameter Destination.Arn is not valid. "
"Reason: Must contain an event bus ARN."
)
self._get_event_bus(event_bus_arn)
archive_name = source_arn.split("/")[-1]
archive = self.archives.get(archive_name)
if not archive:
raise ValidationException(
"Parameter EventSourceArn is not valid. "
"Reason: Archive {} does not exist.".format(archive_name)
)
if event_bus_arn != archive.source_arn:
raise ValidationException(
"Parameter Destination.Arn is not valid. "
"Reason: Cross event bus replay is not permitted."
)
if start_time > end_time:
raise ValidationException(
"Parameter EventEndTime is not valid. "
"Reason: EventStartTime must be before EventEndTime."
)
if name in self.replays:
raise ResourceAlreadyExistsException(
"Replay {} already exists.".format(name)
)
replay = Replay(
self.region_name,
name,
description,
source_arn,
start_time,
end_time,
destination,
)
self.replays[name] = replay
replay.replay_events(archive)
return {
"ReplayArn": replay.arn,
"ReplayStartTime": replay.start_time,
"State": ReplayState.STARTING.value, # the replay will be done before returning the response
}
def describe_replay(self, name):
replay = self._get_replay(name)
return replay.describe()
def list_replays(self, name_prefix, source_arn, state):
if [name_prefix, source_arn, state].count(None) < 2:
raise ValidationException(
"At most one filter is allowed for ListReplays. "
"Use either : State, EventSourceArn, or NamePrefix."
)
valid_states = sorted([item.value for item in ReplayState])
if state and state not in valid_states:
raise ValidationException(
"1 validation error detected: "
"Value '{0}' at 'state' failed to satisfy constraint: "
"Member must satisfy enum value set: "
"[{1}]".format(state, ", ".join(valid_states))
)
if [name_prefix, source_arn, state].count(None) == 3:
return [replay.describe_short() for replay in self.replays.values()]
result = []
for replay in self.replays.values():
if name_prefix and replay.name.startswith(name_prefix):
result.append(replay.describe_short())
elif source_arn and replay.source_arn == source_arn:
result.append(replay.describe_short())
elif state and replay.state == state:
result.append(replay.describe_short())
return result
def cancel_replay(self, name):
replay = self._get_replay(name)
# replays in the state 'COMPLETED' can't be canceled,
# but the implementation is done synchronously,
# so they are done right after the start
if replay.state not in [
ReplayState.STARTING,
ReplayState.RUNNING,
ReplayState.COMPLETED,
]:
raise IllegalStatusException(
"Replay {} is not in a valid state for this operation.".format(name)
)
replay.state = ReplayState.CANCELLED
return {"ReplayArn": replay.arn, "State": ReplayState.CANCELLING.value}
def create_connection(self, name, description, authorization_type, auth_parameters):
connection = Connection(
name, self.region_name, description, authorization_type, auth_parameters
)
self.connections[name] = connection
return connection
def update_connection(self, *, name, **kwargs):
connection = self.connections.get(name)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
for attr, value in kwargs.items():
if value is not None and hasattr(connection, attr):
setattr(connection, attr, value)
return connection.describe_short()
def list_connections(self):
return self.connections.values()
def describe_connection(self, name):
"""
Retrieves details about a connection.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeConnection.html
Args:
name: The name of the connection to retrieve.
Raises:
ResourceNotFoundException: When the connection is not present.
Returns:
dict
"""
connection = self.connections.get(name)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
return connection.describe()
def delete_connection(self, name):
"""
Deletes a connection.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteConnection.html
Args:
name: The name of the connection to delete.
Raises:
ResourceNotFoundException: When the connection is not present.
Returns:
dict
"""
connection = self.connections.pop(name, None)
if not connection:
raise ResourceNotFoundException(
"Connection '{}' does not exist.".format(name)
)
return connection.describe_short()
def create_api_destination(
self,
name,
description,
connection_arn,
invocation_endpoint,
invocation_rate_limit_per_second,
http_method,
):
"""
Creates an API destination, which is an HTTP invocation endpoint configured as a target for events.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_CreateApiDestination.html
Returns:
dict
"""
destination = Destination(
name=name,
region_name=self.region_name,
description=description,
connection_arn=connection_arn,
invocation_endpoint=invocation_endpoint,
invocation_rate_limit_per_second=invocation_rate_limit_per_second,
http_method=http_method,
)
self.destinations[name] = destination
return destination.describe_short()
def list_api_destinations(self):
return self.destinations.values()
def describe_api_destination(self, name):
"""
Retrieves details about an API destination.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DescribeApiDestination.html
Args:
name: The name of the API destination to retrieve.
Returns:
dict
"""
destination = self.destinations.get(name)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
return destination.describe()
def update_api_destination(self, *, name, **kwargs):
"""
Creates an API destination, which is an HTTP invocation endpoint configured as a target for events.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_UpdateApiDestination.html
Returns:
dict
"""
destination = self.destinations.get(name)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
for attr, value in kwargs.items():
if value is not None and hasattr(destination, attr):
setattr(destination, attr, value)
return destination.describe_short()
def delete_api_destination(self, name):
"""
Deletes the specified API destination.
Docs:
https://docs.aws.amazon.com/eventbridge/latest/APIReference/API_DeleteApiDestination.html
Args:
name: The name of the destination to delete.
Raises:
ResourceNotFoundException: When the destination is not present.
Returns:
dict
"""
destination = self.destinations.pop(name, None)
if not destination:
raise ResourceNotFoundException(
"An api-destination '{}' does not exist.".format(name)
)
return {}
events_backends = {}
for region in Session().get_available_regions("events"):
events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events", partition_name="aws-us-gov"):
events_backends[region] = EventsBackend(region)
for region in Session().get_available_regions("events", partition_name="aws-cn"):
events_backends[region] = EventsBackend(region)
| 33.717822
| 122
| 0.594431
|
928c7fd1f8ed0dd9a409c3faef0da00a25f58723
| 7,293
|
py
|
Python
|
test/test_tensorflow_graph_pad_conv.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
test/test_tensorflow_graph_pad_conv.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
test/test_tensorflow_graph_pad_conv.py
|
deb-intel/LPOTtest
|
f7b7524c733e581668d15192b69f9d9a7ca5222d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
import yaml
import tensorflow as tf
from tensorflow.python.framework import graph_util
from lpot.adaptor.tf_utils.util import disable_random
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: input
outputs: op_to_store
device: cpu
quantization:
model_wise:
weight:
granularity: per_tensor
scheme: sym
dtype: int8
algorithm: kl
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: mse
accuracy_criterion:
relative: 0.01
exit_policy:
performance_only: True
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
class TestFoldPadConv(unittest.TestCase):
@classmethod
def setUpClass(self):
build_fake_yaml()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
@disable_random()
def test_fold_pad_conv(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed, name='op_to_store')
out_name = relu.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from lpot.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer()
found_pad = False
if tf.__version__ >= "2.0.0":
for i in output_graph.graph_def.node:
if i.op == 'Pad':
found_pad = True
break
self.assertEqual(found_pad, True)
@disable_random()
def test_fold_pad_conv2(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
paddings2 = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad2 = tf.pad(x, paddings2, "CONSTANT")
conv_weights2 = tf.compat.v1.get_variable("weight2", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(x_pad2, conv_weights2, strides=[1, 2, 2, 1], padding="VALID")
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
relu2 = tf.nn.relu(normed2)
add = tf.math.add(relu, relu2, name='op_to_store')
out_name = add.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from lpot.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer()
found_pad = False
if tf.__version__ >= "2.0.0":
for i in output_graph.graph_def.node:
if i.op == 'Pad':
found_pad = True
break
self.assertEqual(found_pad, True)
@disable_random()
def test_fold_pad_conv3(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(x, paddings, "CONSTANT")
conv_weights = tf.compat.v1.get_variable("weight", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
normed = tf.compat.v1.layers.batch_normalization(conv)
relu = tf.nn.relu(normed)
conv_weights2 = tf.compat.v1.get_variable("weight2", [3, 3, 16, 16],
initializer=tf.compat.v1.random_normal_initializer())
conv2 = tf.nn.conv2d(x, conv_weights2, strides=[1, 2, 2, 1], padding="SAME")
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
relu2 = tf.nn.relu(normed2)
add = tf.math.add(relu, relu2, name='op_to_store')
out_name = add.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=sess.graph_def,
output_node_names=[out_name])
from lpot.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 56, 56, 16), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = output_graph_def
output_graph = quantizer()
found_pad = False
if tf.__version__ >= "2.0.0":
for i in output_graph.graph_def.node:
if i.op == 'Pad':
found_pad = True
break
self.assertEqual(found_pad, True)
if __name__ == "__main__":
unittest.main()
| 41.913793
| 103
| 0.566296
|
28949667ce219452bc7902e75750b933d46409c9
| 317
|
py
|
Python
|
sapextractor/factory.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | null | null | null |
sapextractor/factory.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | null | null | null |
sapextractor/factory.py
|
aarkue/sap-meta-explorer
|
613bf657bbaa72a3781a84664e5de7626516532f
|
[
"Apache-2.0"
] | null | null | null |
from sapextractor.database_connection import factory as conn_factory
from sapextractor.algo import factory as algo_factory
def apply(db_type, db_con_arg, process, ext_type, ext_arg):
print(process)
con = conn_factory.apply(db_type, db_con_arg)
return algo_factory.apply(con, process, ext_type, ext_arg)
| 35.222222
| 68
| 0.798107
|
10eb809e38f62b27a9fd8f3084eb1e1b1061d198
| 83
|
py
|
Python
|
unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
|
byte-trading/unicorn-binance-websocket-api
|
3aa2f87ef107db75c0c1aaebbcc6a8564357b256
|
[
"MIT"
] | null | null | null |
unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
|
byte-trading/unicorn-binance-websocket-api
|
3aa2f87ef107db75c0c1aaebbcc6a8564357b256
|
[
"MIT"
] | null | null | null |
unicorn_binance_websocket_api/unicorn_binance_websocket_api_manager.py
|
byte-trading/unicorn-binance-websocket-api
|
3aa2f87ef107db75c0c1aaebbcc6a8564357b256
|
[
"MIT"
] | null | null | null |
# backward compatibility <= 1.35.0
from .manager import BinanceWebSocketApiManager
| 27.666667
| 47
| 0.819277
|
ef944e45ac26c99652ffd616ddbf206a9ff3004b
| 7,943
|
py
|
Python
|
python3/madasterapi/model/account_response.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | 2
|
2021-04-13T12:19:26.000Z
|
2021-09-13T15:40:44.000Z
|
python3/madasterapi/model/account_response.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | null | null | null |
python3/madasterapi/model/account_response.py
|
Madaster/examples
|
bd2e8e464172e0d47cac8ed1672501a24ba624c3
|
[
"MIT"
] | null | null | null |
"""
Madaster Private API - Build: 8815
Welcome to the **Madaster Private API** endpoint. This endpoint can be used to interact with the Madaster Platform and its resources. This API does not fully cover all functionality of the platform yet, please see below for the available functions and what they can be used for. For detailed information about the platform and this API, please refer to the [Madaster Documentation](https://docs.madaster.com) or the [Madaster API Documentation](https://docs.madaster.com/api).<br/><br/>To access these resources, you need an authorization token. If you do not have one yet, see the chapter about Authorization in the [API documentation](https://docs.madaster.com/api). This token should be sent as a header with the name 'X-API-Key', which will authenticate the request with the token. The documentation below specifies which requests are available and which responses they might produce.<br/><br/>This API can be reached at the endpoint: **[https://api.madaster.com/](https://api.madaster.com/)** # noqa: E501
The version of the OpenAPI document: v3.0
Contact: service@madaster.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from madasterapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from madasterapi.model.account_type import AccountType
globals()['AccountType'] = AccountType
class AccountResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'name': (str, none_type,), # noqa: E501
'account_type': (AccountType,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'account_type': 'accountType', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AccountResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): The account identifier. [optional] # noqa: E501
name (str, none_type): The account name. [optional] # noqa: E501
account_type (AccountType): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 44.374302
| 1,015
| 0.607075
|
10c75cb45373b832a70aee9a8a77d1549bd75f6c
| 5,398
|
py
|
Python
|
ceph_installer/tests/controllers/test_tasks.py
|
ceph/ceph-installer
|
79988577176d6ea628d6c4f50ed3f55831012a34
|
[
"MIT"
] | 16
|
2016-02-26T01:06:13.000Z
|
2020-03-04T02:04:35.000Z
|
ceph_installer/tests/controllers/test_tasks.py
|
ceph/ceph-installer
|
79988577176d6ea628d6c4f50ed3f55831012a34
|
[
"MIT"
] | 73
|
2016-02-11T14:56:25.000Z
|
2021-11-25T02:09:04.000Z
|
ceph_installer/tests/controllers/test_tasks.py
|
ceph/mariner-installer
|
79988577176d6ea628d6c4f50ed3f55831012a34
|
[
"MIT"
] | 7
|
2016-02-12T17:49:02.000Z
|
2021-01-27T10:49:14.000Z
|
from datetime import datetime
from uuid import uuid4
from ceph_installer.models import Task
class TestTasksController(object):
def create_task(self, **kw):
Task(
identifier=kw.get('identifier', str(uuid4())),
endpoint='/api/rgw/',
command='ansible-playbook -i "rgw.example.com," playbook.yml',
stderr='',
stdout='',
started=kw.get('started', datetime.utcnow()),
ended=kw.get('ended', datetime.utcnow()),
succeeded=True
)
def test_index_get_no_tasks(self, session):
result = session.app.get("/api/tasks/")
assert result.json == []
def test_index_get_single_task(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert len(result.json) == 1
def test_index_get_single_task_identifier(self, session):
self.create_task(identifier='uuid-1')
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['identifier'] == 'uuid-1'
def test_index_get_single_task_endpoint(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['endpoint'] == '/api/rgw/'
def test_index_get_single_task_command(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['command'] == 'ansible-playbook -i "rgw.example.com," playbook.yml'
def test_index_get_single_task_stdout(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['stdout'] == ''
def test_index_get_single_task_stderr(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['stderr'] == ''
def test_index_get_single_task_started(self, session):
started = datetime.utcnow()
self.create_task(started=started)
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['started'] == started.isoformat().replace('T', ' ')
def test_index_get_single_task_ended(self, session):
ended = datetime.utcnow()
self.create_task(ended=ended)
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['ended'] == ended.isoformat().replace('T', ' ')
def test_index_get_single_task_succeeded(self, session):
self.create_task()
session.commit()
result = session.app.get("/api/tasks/")
assert result.json[0]['succeeded'] is True
class TestTaskController(object):
def create_task(self, **kw):
Task(
identifier=kw.get('identifier', str(uuid4())),
endpoint='/api/rgw/',
command='ansible-playbook -i "rgw.example.com," playbook.yml',
stderr='',
stdout='',
started=kw.get('started', datetime.utcnow()),
ended=kw.get('ended', datetime.utcnow()),
succeeded=True
)
def test_task_not_found(self, session):
result = session.app.get(
'/api/tasks/1234-asdf-1234-asdf/',
expect_errors=True)
assert result.status_int == 404
def test_task_exists_with_metadata(self, session):
identifier = '1234-asdf-1234-asdf'
self.create_task(identifier=identifier)
result = session.app.get('/api/tasks/1234-asdf-1234-asdf/')
assert result.json['identifier']
class TestTaskControllerRequests(object):
def create_task(self, **kw):
Task(
request=kw.get('request'),
identifier=kw.get('identifier', '1234-asdf-1234-asdf'),
endpoint='/api/rgw/',
command='ansible-playbook -i "rgw.example.com," playbook.yml',
stderr='',
stdout='',
started=kw.get('started', datetime.utcnow()),
ended=kw.get('ended', datetime.utcnow()),
succeeded=True
)
def test_request_is_none(self, session):
self.create_task()
result = session.app.get(
'/api/tasks/1234-asdf-1234-asdf/',
expect_errors=True)
print result.json
assert result.json['user_agent'] == ''
assert result.json['http_method'] == ''
assert result.json['request'] == ''
def test_request_with_valid_method(self, fake, session):
fake_request = fake(method='POST')
self.create_task(request=fake_request)
result = session.app.get('/api/tasks/1234-asdf-1234-asdf/')
assert result.json['http_method'] == 'POST'
def test_request_with_valid_body(self, fake, session):
fake_request = fake(body='{"host": "example.com"}')
self.create_task(request=fake_request)
result = session.app.get('/api/tasks/1234-asdf-1234-asdf/')
assert result.json['request'] == '{"host": "example.com"}'
def test_request_with_valid_user_agent(self, fake, session):
fake_request = fake(user_agent='Mozilla/5.0')
self.create_task(request=fake_request)
result = session.app.get('/api/tasks/1234-asdf-1234-asdf/')
assert result.json['user_agent'] == 'Mozilla/5.0'
| 35.513158
| 97
| 0.607818
|
beaa6e7dcb4fd89545e8acf61e26958347409499
| 777
|
py
|
Python
|
samples/python2/mser.py
|
bonanza-market/opencv
|
6550cb2a90b2b074234a3b2ae354d01a1e55fd2b
|
[
"BSD-3-Clause"
] | 144
|
2015-01-15T03:38:44.000Z
|
2022-02-17T09:07:52.000Z
|
samples/python2/mser.py
|
bonanza-market/opencv
|
6550cb2a90b2b074234a3b2ae354d01a1e55fd2b
|
[
"BSD-3-Clause"
] | 9
|
2015-09-09T06:51:46.000Z
|
2020-06-17T14:10:10.000Z
|
samples/python2/mser.py
|
bonanza-market/opencv
|
6550cb2a90b2b074234a3b2ae354d01a1e55fd2b
|
[
"BSD-3-Clause"
] | 73
|
2015-06-20T15:59:27.000Z
|
2020-03-15T22:43:36.000Z
|
#!/usr/bin/env python
'''
MSER detector demo
==================
Usage:
------
mser.py [<video source>]
Keys:
-----
ESC - exit
'''
import numpy as np
import cv2
import video
if __name__ == '__main__':
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
cam = video.create_capture(video_src)
mser = cv2.MSER()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
regions = mser.detect(gray, None)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
cv2.imshow('img', vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
| 18.069767
| 70
| 0.544402
|
55ff1b78f2053054f32802d609009c846a1fa086
| 2,866
|
py
|
Python
|
python/game-idea/main.py
|
ASMlover/study
|
5878f862573061f94c5776a351e30270dfd9966a
|
[
"BSD-2-Clause"
] | 22
|
2015-05-18T07:04:36.000Z
|
2021-08-02T03:01:43.000Z
|
python/game-idea/main.py
|
ASMlover/study
|
5878f862573061f94c5776a351e30270dfd9966a
|
[
"BSD-2-Clause"
] | 1
|
2017-08-31T22:13:57.000Z
|
2017-09-05T15:00:25.000Z
|
python/game-idea/main.py
|
ASMlover/study
|
5878f862573061f94c5776a351e30270dfd9966a
|
[
"BSD-2-Clause"
] | 6
|
2015-06-06T07:16:12.000Z
|
2021-07-06T13:45:56.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import trigger as tm
class Space(object):
def __init__(self):
pass
def monster_count(self):
return 0
class TriggerManager(object):
def __init__(self, space=None):
self.space = space
self.triggers = {}
self.activate_triggers = set()
def register(self, trigger_no, infos):
trigger_name = 'Trigger%d' % trigger_no
trigger_type = getattr(tm, trigger_name)
if trigger_type:
trigger = trigger_type(self.space, infos)
self.triggers[trigger_no] = trigger
if trigger.activatiable():
self.activate_triggers.add(trigger_no)
def unregister(self, trigger_no):
self.triggers.pop(trigger_no, None)
def on_event_notify(self, notify, *args):
completed_triggers = []
for trigger_no in self.activate_triggers:
trigger = self.triggers.get(trigger_no, None)
if not trigger:
continue
on_event = getattr(trigger, notify, None)
if on_event:
on_event(*args)
if trigger.is_completed():
completed_triggers.append(trigger_no)
[self.activate_triggers.discard(no) for no in completed_triggers]
if __name__ == '__main__':
space = Space()
trigger_mgr = TriggerManager(space)
trigger_mgr.register(1101, {'cond': 0, 'action': 'all monsters dead !!!'})
trigger_mgr.on_event_notify('on_monster_die')
| 36.278481
| 78
| 0.693301
|
64afce562e4810200045ab9a731f6f158c821182
| 10,165
|
py
|
Python
|
src/test/python/apache/aurora/client/commands/test_hooks.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
src/test/python/apache/aurora/client/commands/test_hooks.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
src/test/python/apache/aurora/client/commands/test_hooks.py
|
wickman/incubator-aurora
|
9906d217093568ed4c9cfe620862818f15ce4150
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import unittest
from apache.aurora.client.commands.core import create
from apache.aurora.client.commands.util import AuroraClientCommandTest
from apache.aurora.client.config import AuroraConfig, GlobalHookRegistry
from apache.aurora.client.hooks.hooked_api import HookedAuroraClientAPI
from twitter.common import app
from twitter.common.contextutil import temporary_file
from gen.apache.aurora.ttypes import (
AssignedTask,
Identity,
Response,
ResponseCode,
Result,
ScheduledTask,
ScheduleStatus,
ScheduleStatusResult,
TaskEvent,
TaskQuery,
)
from mock import Mock, patch
from pystachio.config import Config
class CreateHookForTesting(object):
def __init__(self, succeed):
self.created_jobs = []
self.succeed = succeed
def pre_create_job(self, api, config):
self.created_jobs.append(config)
return self.succeed
class TestClientCreateCommand(AuroraClientCommandTest):
def setUp(self):
GlobalHookRegistry.reset()
@classmethod
def setup_mock_options(cls):
"""set up to get a mock options object."""
mock_options = Mock()
mock_options.json = False
mock_options.bindings = {}
mock_options.open_browser = False
mock_options.cluster = None
mock_options.wait_until = 'RUNNING' # or 'FINISHED' for other tests
mock_options.disable_all_hooks_reason = None
return mock_options
@classmethod
def create_mock_task(cls, task_id, instance_id, initial_time, status):
mock_task = Mock(spec=ScheduledTask)
mock_task.assignedTask = Mock(spec=AssignedTask)
mock_task.assignedTask.taskId = task_id
mock_task.assignedTask.instanceId = instance_id
mock_task.status = status
mock_task_event = Mock(spec=TaskEvent)
mock_task_event.timestamp = initial_time
mock_task.taskEvents = [mock_task_event]
return mock_task
@classmethod
def create_mock_status_query_result(cls, scheduleStatus):
mock_query_result = cls.create_simple_success_response()
mock_query_result.result.scheduleStatusResult = Mock(spec=ScheduleStatusResult)
if scheduleStatus == ScheduleStatus.INIT:
# status query result for before job is launched.
mock_query_result.result.scheduleStatusResult.tasks = []
else:
mock_task_one = cls.create_mock_task('hello', 0, 1000, scheduleStatus)
mock_task_two = cls.create_mock_task('hello', 1, 1004, scheduleStatus)
mock_query_result.result.scheduleStatusResult.tasks = [mock_task_one, mock_task_two]
return mock_query_result
@classmethod
def create_mock_query(cls):
return TaskQuery(owner=Identity(role=cls.TEST_ROLE), environment=cls.TEST_ENV,
jobName=cls.TEST_JOB)
@classmethod
def get_createjob_response(cls):
# Then, we call api.create_job(config)
return cls.create_simple_success_response()
@classmethod
def get_failed_createjob_response(cls):
return cls.create_error_response()
@classmethod
def assert_create_job_called(cls, mock_api):
# Check that create_job was called exactly once, with an AuroraConfig parameter.
assert mock_api.create_job.call_count == 1
assert isinstance(mock_api.create_job.call_args_list[0][0][0], AuroraConfig)
@classmethod
def assert_scheduler_called(cls, mock_api, mock_query, num_queries):
# scheduler.scheduler() is called once, as a part of the handle_open call.
assert mock_api.scheduler_proxy.getTasksStatus.call_count == num_queries
mock_api.scheduler_proxy.getTasksStatus.assert_called_with(mock_query)
def test_create_job_hook_called(self):
"""Run a test of the "create" command against a mocked API;
verifies that a required hook runs, even though the config doesn't mention it.
"""
# Create a hook on "create_job" that just adds something to a list in the test.
# Patch in HookedAuroraClientAPI to replace the UnhookedAuroraClientAPI with a mock.
mock_options = self.setup_mock_options()
hook = CreateHookForTesting(True)
GlobalHookRegistry.register_global_hook(hook)
# create first calls get_job_config, which calls get_config. As long as we've got the options
# set up correctly, this should work.
# Next, create gets an API object via make_client. We need to replace that with a mock API.
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
with contextlib.nested(
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.SchedulerProxy', return_value = mock_scheduler_proxy),
patch('twitter.common.app.get_options', return_value=mock_options)):
mock_query = self.create_mock_query()
mock_scheduler_proxy.createJob.return_value=self.get_createjob_response()
mock_scheduler_proxy.getTasksStatus.side_effect = [
self.create_mock_status_query_result(ScheduleStatus.INIT),
self.create_mock_status_query_result(ScheduleStatus.RUNNING)
]
# Finally, it calls the monitor to watch and make sure the jobs started;
# but we already set that up in the side-effects list for the query mock.
# This is the real test: invoke create as if it had been called by the command line.
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
create(['west/mchucarroll/test/hello', fp.name])
# Now check that the right API calls got made.
assert mock_scheduler_proxy.createJob.call_count == 1
assert len(hook.created_jobs) == 1
def test_create_job_hook_aborts(self):
"""Run a test of the "create" command against a mocked API;
verifies that a required hook runs, even though the config doesn't mention it.
"""
# Create a hook on "create_job" that just adds something to a list in the test.
# Patch in HookedAuroraClientAPI to replace the UnhookedAuroraClientAPI with a mock.
mock_options = self.setup_mock_options()
hook = CreateHookForTesting(False)
GlobalHookRegistry.register_global_hook(hook)
# create first calls get_job_config, which calls get_config. As long as we've got the options
# set up correctly, this should work.
# Next, create gets an API object via make_client. We need to replace that with a mock API.
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
with contextlib.nested(
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.SchedulerProxy', return_value = mock_scheduler_proxy),
patch('twitter.common.app.get_options', return_value=mock_options)):
mock_query = self.create_mock_query()
mock_scheduler_proxy.createJob.return_value=self.get_createjob_response()
mock_scheduler_proxy.getTasksStatus.side_effect = [
self.create_mock_status_query_result(ScheduleStatus.INIT),
self.create_mock_status_query_result(ScheduleStatus.RUNNING)
]
# Finally, it calls the monitor to watch and make sure the jobs started;
# but we already set that up in the side-effects list for the query mock.
# This is the real test: invoke create as if it had been called by the command line.
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
self.assertRaises(HookedAuroraClientAPI.PreHooksStoppedCall, create,
['west/mchucarroll/test/hello', fp.name])
# Now check that the right API calls got made.
assert mock_scheduler_proxy.createJob.call_count == 0
assert len(hook.created_jobs) == 1
def test_block_hooks(self):
"""Run a test of the "create" command against a mocked API;
verifies that a required hook runs, even though the config doesn't mention it.
"""
# Create a hook on "create_job" that just adds something to a list in the test.
# Patch in HookedAuroraClientAPI to replace the UnhookedAuroraClientAPI with a mock.
mock_options = self.setup_mock_options()
hook = CreateHookForTesting(True)
GlobalHookRegistry.register_global_hook(hook)
mock_options.disable_all_hooks_reason = "Because I said so."
# create first calls get_job_config, which calls get_config. As long as we've got the options
# set up correctly, this should work.
# Next, create gets an API object via make_client. We need to replace that with a mock API.
(mock_api, mock_scheduler_proxy) = self.create_mock_api()
with contextlib.nested(
patch('apache.aurora.client.factory.CLUSTERS', new=self.TEST_CLUSTERS),
patch('apache.aurora.client.api.SchedulerProxy', return_value = mock_scheduler_proxy),
patch('twitter.common.app.get_options', return_value=mock_options)):
mock_query = self.create_mock_query()
mock_scheduler_proxy.createJob.return_value=self.get_createjob_response()
mock_scheduler_proxy.getTasksStatus.side_effect = [
self.create_mock_status_query_result(ScheduleStatus.INIT),
self.create_mock_status_query_result(ScheduleStatus.RUNNING)
]
# Finally, it calls the monitor to watch and make sure the jobs started;
# but we already set that up in the side-effects list for the query mock.
# This is the real test: invoke create as if it had been called by the command line.
with temporary_file() as fp:
fp.write(self.get_valid_config())
fp.flush()
create(['west/mchucarroll/test/hello', fp.name])
# Now check that the right API calls got made.
assert mock_scheduler_proxy.createJob.call_count == 1
assert len(hook.created_jobs) == 0
| 40.987903
| 97
| 0.742056
|
c646c7b3541053893948864e59933756383f6dc0
| 2,235
|
py
|
Python
|
bevy/app/agents/hooks.py
|
ZechCodes/bevy.app
|
c56bd1dba456969807abd8b71f8f02a68c23025d
|
[
"MIT"
] | null | null | null |
bevy/app/agents/hooks.py
|
ZechCodes/bevy.app
|
c56bd1dba456969807abd8b71f8f02a68c23025d
|
[
"MIT"
] | null | null | null |
bevy/app/agents/hooks.py
|
ZechCodes/bevy.app
|
c56bd1dba456969807abd8b71f8f02a68c23025d
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from asyncio import gather
from collections import defaultdict
from copy import deepcopy
from inspect import isawaitable, getmro
from typing import Awaitable, Callable, cast, Generator, Iterable, ParamSpec, TypeVar
P = ParamSpec("P")
R = TypeVar("R")
class HookClassRepository:
def __init__(self):
self.repo = {}
def __get__(self, _, owner: Hookable) -> dict[str, set[str]]:
if id(owner) not in self.repo:
self.repo[id(owner)] = self.build_hook_dict(owner)
return self.repo[id(owner)]
def build_hook_dict(self, owner):
for super_ in getmro(owner):
if super_ is not owner and hasattr(super_, "__bevy_hooks__"):
return deepcopy(super_.__bevy_hooks__)
return defaultdict(set)
class Hookable:
__bevy_hooks__: dict[str, set[str]] = HookClassRepository()
def dispatch_to_hook(
self, hook_name: str, *args: P.args, **kwargs: P.kwargs
) -> Awaitable:
return gather(
*self.__run_callbacks(self.__get_callbacks(hook_name), *args, **kwargs)
)
def __get_callbacks(self, hook_name: str) -> Generator[None, Callable[P, R], None]:
yield from (
getattr(self, name) for name in type(self).__bevy_hooks__[hook_name]
)
def __run_callbacks(
self, callbacks: Iterable[Callable[P, R]], *args: P.args, **kwargs: P.kwargs
) -> Generator[None, Awaitable, None]:
yield from (
ret
for callback in callbacks
if isawaitable(ret := self.__run_callback(callback, *args, **kwargs))
)
def __run_callback(self, callback, *args, **kwargs) -> Awaitable | None:
return callback(*args, **kwargs)
class Hook:
def __init__(self, hook_name: str):
self._hook_name = hook_name
self._func = None
def __call__(self, func: Callable[P, R]) -> Callable[P, R]:
self._func = func
return self
def __set_name__(self, owner: Hookable, name: str):
owner.__bevy_hooks__[self._hook_name].add(name)
setattr(owner, name, self._func)
def hook(hook_name: str) -> Callable[P, R]:
return cast(Callable[P, R], Hook(hook_name))
| 29.8
| 87
| 0.640268
|
2263c68d084352050a83e2bb94465b9c0f097aff
| 7,004
|
py
|
Python
|
desktop/core/ext-py/Twisted/twisted/internet/pollreactor.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
desktop/core/ext-py/Twisted/twisted/internet/pollreactor.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
desktop/core/ext-py/Twisted/twisted/internet/pollreactor.py
|
civascu/hue
|
82f2de44789ff5a981ed725175bae7944832d1e9
|
[
"Apache-2.0"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A poll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import pollreactor
pollreactor.install()
Maintainer: Itamar Shtull-Trauring
"""
# System imports
import errno, sys
from select import error as SelectError, poll
from select import POLLIN, POLLOUT, POLLHUP, POLLERR, POLLNVAL
from zope.interface import implements
# Twisted imports
from twisted.python import log
from twisted.internet import main, posixbase, error
from twisted.internet.interfaces import IReactorFDSet
POLL_DISCONNECTED = (POLLHUP | POLLERR | POLLNVAL)
class PollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses poll(2).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize polling object, file descriptor tracking dictionaries, and
the base class.
"""
self._poller = poll()
self._selectables = {}
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
self._poller.unregister(fd)
except KeyError:
pass
mask = 0
if fd in self._reads:
mask = mask | POLLIN
if fd in self._writes:
mask = mask | POLLOUT
if mask != 0:
self._poller.register(fd, mask)
else:
if fd in self._selectables:
del self._selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if fd in mdict:
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
fd = reader.fileno()
if fd not in self._reads:
self._selectables[fd] = reader
self._reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
fd = writer.fileno()
if fd not in self._writes:
self._selectables[fd] = writer
self._writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
return self._dictRemove(reader, self._reads)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
return self._dictRemove(writer, self._writes)
def removeAll(self):
"""Remove all selectables, and return a list of them."""
if self.waker is not None:
self.removeReader(self.waker)
result = self._selectables.values()
fds = self._selectables.keys()
self._reads.clear()
self._writes.clear()
self._selectables.clear()
for fd in fds:
self._poller.unregister(fd)
if self.waker is not None:
self.addReader(self.waker)
return result
def doPoll(self, timeout):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = self._poller.poll(timeout)
except SelectError, e:
if e[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
why = None
inRead = False
if event & POLL_DISCONNECTED and not (event & POLLIN):
why = main.CONNECTION_LOST
else:
try:
if event & POLLIN:
why = selectable.doRead()
inRead = True
if not why and event & POLLOUT:
why = selectable.doWrite()
inRead = False
if not selectable.fileno() == fd:
why = error.ConnectionFdescWentAway('Filedescriptor went away')
inRead = False
except:
log.deferr()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["PollReactor", "install"]
| 32.12844
| 83
| 0.602227
|
ae14ade4b9a99521ad80e408e0d523f519e11c82
| 2,918
|
py
|
Python
|
thumbor/engines/gif.py
|
ustun/thumbor
|
2641b17a6cea5d3ebf20abd1d2b464565c1b8b97
|
[
"MIT"
] | 1
|
2019-03-05T13:30:19.000Z
|
2019-03-05T13:30:19.000Z
|
thumbor/engines/gif.py
|
ustun/thumbor
|
2641b17a6cea5d3ebf20abd1d2b464565c1b8b97
|
[
"MIT"
] | null | null | null |
thumbor/engines/gif.py
|
ustun/thumbor
|
2641b17a6cea5d3ebf20abd1d2b464565c1b8b97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import re
from subprocess import Popen, PIPE
from thumbor.engines.pil import Engine as PILEngine
GIFSICLE_SIZE_REGEX = re.compile(r'(?:logical\sscreen\s(\d+x\d+))')
GIFSICLE_IMAGE_COUNT_REGEX = re.compile(r'(?:(\d+)\simage)')
class Engine(PILEngine):
@property
def size(self):
return self.image_size
def run_gifsicle(self, command):
p = Popen([self.context.server.gifsicle_path] + command.split(' '), stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout_data = p.communicate(input=self.buffer)[0]
return stdout_data
def is_multiple(self):
return self.frame_count > 1
def update_image_info(self):
self._is_multiple = False
result = self.run_gifsicle('--info')
size = GIFSICLE_SIZE_REGEX.search(result)
self.image_size = size.groups()[0].split('x')
self.image_size[0], self.image_size[1] = int(self.image_size[0]), int(self.image_size[1])
count = GIFSICLE_IMAGE_COUNT_REGEX.search(result)
self.frame_count = int(count.groups()[0])
def load(self, buffer, extension):
self.extension = self.get_mimetype(buffer)
self.buffer= buffer
self.operations = []
self.update_image_info()
def draw_rectangle(self, x, y, width, height):
raise NotImplementedError()
def resize(self, width, height):
if width == 0 and height == 0:
return
if width > 0 and height == 0:
arguments = "--resize-width %d" % width
elif height > 0 and width == 0:
arguments = "--resize-height %d" % height
else:
arguments = "--resize %dx%d" % (width, height)
self.operations.append(arguments)
def crop(self, left, top, right, bottom):
arguments = "--crop %d,%d-%d,%d" % (left, top, right, bottom)
self.operations.append(arguments)
self.flush_operations()
self.update_image_info()
def rotate(self, degrees):
if degrees not in [90, 180, 270]:
return
arguments = '--rotate-%d' % degrees
self.operations.append(arguments)
def flip_vertically(self):
self.operations.append('--flip-vertical')
def flip_horizontally(self):
self.operations.append('--flip-horizontal')
def flush_operations(self):
if not self.operations:
return
self.buffer = self.run_gifsicle(" ".join(self.operations))
self.operations = []
def read(self, extension=None, quality=None):
self.flush_operations()
return self.buffer
def convert_to_grayscale(self):
self.operations.append('--use-colormap gray')
| 29.77551
| 113
| 0.63194
|
57b7fafdadde1c9a1b526801b9000a04c4b48fb6
| 8,305
|
py
|
Python
|
FGSM-RS/src/base.py
|
MTandHJ/Pytorch-Robust
|
612ec7e71ca11d3b84aacb3aead3253436ba3d2b
|
[
"MIT"
] | 4
|
2021-11-20T13:11:20.000Z
|
2021-12-11T06:50:34.000Z
|
FGSM-RS/src/base.py
|
MTandHJ/PyTorch-Robust
|
1ba0a96953032db7952278ea49b2f1d5cdfac12c
|
[
"MIT"
] | null | null | null |
FGSM-RS/src/base.py
|
MTandHJ/PyTorch-Robust
|
1ba0a96953032db7952278ea49b2f1d5cdfac12c
|
[
"MIT"
] | null | null | null |
from typing import Callable, Any, Union, Optional, List, Tuple, Dict, Iterable, cast
import torch
import torch.nn as nn
import foolbox as fb
import os
from models.base import AdversarialDefensiveModule
from .utils import AverageMeter, ProgressMeter, timemeter, getLogger
from .loss_zoo import cross_entropy, kl_divergence, lploss
from .config import SAVED_FILENAME, PRE_BESTNAT, PRE_BESTROB, \
BOUNDS, PREPROCESSING, DEVICE
def enter_attack_exit(func) -> Callable:
def wrapper(attacker: "Adversary", *args, **kwargs):
attacker.model.attack(True)
results = func(attacker, *args, **kwargs)
attacker.model.attack(False)
return results
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
class Coach:
def __init__(
self, model: AdversarialDefensiveModule,
loss_func: Callable,
optimizer: torch.optim.Optimizer,
learning_policy: "learning rate policy",
device: torch.device = DEVICE
):
self.model = model
self.device = device
self.loss_func = loss_func
self.optimizer = optimizer
self.learning_policy = learning_policy
self.loss = AverageMeter("Loss")
self.acc = AverageMeter("Acc", fmt=".3%")
self.progress = ProgressMeter(self.loss, self.acc)
self._best_nat = 0.
self._best_rob = 0.
def save_best_nat(self, acc_nat: float, path: str, prefix: str = PRE_BESTNAT):
if acc_nat > self._best_nat:
self._best_nat = acc_nat
self.save(path, '_'.join((prefix, SAVED_FILENAME)))
return 1
else:
return 0
def save_best_rob(self, acc_rob: float, path: str, prefix: str = PRE_BESTROB):
if acc_rob > self._best_rob:
self._best_rob = acc_rob
self.save(path, '_'.join((prefix, SAVED_FILENAME)))
return 1
else:
return 0
def check_best(
self, acc_nat: float, acc_rob: float,
path: str, epoch: int = 8888
):
logger = getLogger()
if self.save_best_nat(acc_nat, path):
logger.debug(f"[Coach] Saving the best nat ({acc_nat:.3%}) model at epoch [{epoch}]")
if self.save_best_rob(acc_rob, path):
logger.debug(f"[Coach] Saving the best rob ({acc_rob:.3%}) model at epoch [{epoch}]")
def save(self, path: str, filename: str = SAVED_FILENAME) -> None:
torch.save(self.model.state_dict(), os.path.join(path, filename))
@timemeter("AdvTraining/Epoch")
def adv_train(
self,
trainloader: Iterable[Tuple[torch.Tensor, torch.Tensor]],
attacker: "Adversary",
*, epoch: int = 8888
) -> float:
self.progress.step() # reset the meter
for inputs, labels in trainloader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
clipped = attacker(inputs, labels)
self.model.train()
outs = self.model(clipped)
loss = self.loss_func(outs, labels)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
accuracy_count = (outs.argmax(-1) == labels).sum().item()
self.loss.update(loss.item(), inputs.size(0), mode="mean")
self.acc.update(accuracy_count, inputs.size(0), mode="sum")
self.learning_policy.step() # update the learning rate
self.progress.display(epoch=epoch)
return self.loss.avg
class Adversary:
def __init__(
self, model: AdversarialDefensiveModule,
attacker: Callable, device: torch.device = DEVICE,
) -> None:
model.eval()
self.model = model
self.attacker = attacker
self.device = device
def attack(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
def __call__(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
return self.attack(inputs, targets)
class AdversaryForTrain(Adversary):
@enter_attack_exit
def attack(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
self.model.train() # FGSM-RS crafts adversarial samples in training mode !!!
return self.attacker(self.model, inputs, targets)
class AdversaryForValid(Adversary):
def attack(self, inputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
self.model.eval() # make sure in inference mode
return self.attacker(self.model, inputs, targets)
@torch.no_grad()
def accuracy(self, inputs: torch.Tensor, labels: torch.Tensor) -> int:
self.model.eval() # make sure in evaluation mode ...
predictions = self.model(inputs).argmax(dim=-1)
accuracy = (predictions == labels)
return cast(int, accuracy.sum().item())
def evaluate(
self,
dataloader: Iterable[Tuple[torch.Tensor, torch.Tensor]],
*, defending: bool = True
) -> Tuple[float, float]:
datasize = len(dataloader.dataset) # type: ignore
acc_nat = 0
acc_adv = 0
self.model.defend(defending) # enter 'defending' mode
self.model.eval()
for inputs, labels in dataloader:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
inputs_adv = self.attack(inputs, labels)
acc_nat += self.accuracy(inputs, labels)
acc_adv += self.accuracy(inputs_adv, labels)
return acc_nat / datasize, acc_adv / datasize
class FBAdversary(Adversary):
"""
FBAdversary is mainly based on foolbox, especially pytorchmodel.
model: Make sure that the model's output is the logits or the attack is adapted.
attacker: the attack implemented by foolbox or a similar one
device: ...
bounds: typically [0, 1]
preprocessing: including mean, std, which is similar to normalizer
criterion: typically given the labels and consequently it is Misclassification,
other critera could be given to carry target attack or black attack.
"""
def __init__(
self, model: AdversarialDefensiveModule,
attacker: Callable, epsilon: Union[None, float, List[float]],
device: torch.device = DEVICE,
bounds: Tuple[float, float] = BOUNDS,
preprocessing: Optional[Dict] = PREPROCESSING
) -> None:
super(FBAdversary, self).__init__(
model=model, attacker=attacker, device=device
)
self.model.eval()
self.fmodel = fb.PyTorchModel(
model,
bounds=bounds,
preprocessing=preprocessing,
device=device
)
self.device = device
self.epsilon = epsilon
self.attacker = attacker
def attack(
self,
inputs: torch.Tensor,
criterion: Any,
epsilon: Union[None, float, List[float]] = None
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if epsilon is None:
epsilon = self.epsilon
self.model.eval() # make sure in evaluation mode ...
return self.attacker(self.fmodel, inputs, criterion, epsilons=epsilon)
def __call__(
self,
inputs: torch.Tensor, criterion: Any,
epsilon: Optional[float] = None
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
return self.attack(inputs, criterion, epsilon)
class FBDefense:
def __init__(
self,
model: nn.Module,
device: torch.device = DEVICE,
bounds: Tuple[float, float] = BOUNDS,
preprocessing: Optional[Dict] = PREPROCESSING
) -> None:
self.rmodel = fb.PyTorchModel(
model,
bounds=bounds,
preprocessing=preprocessing,
device=device
)
self.model = model
def train(self, mode: bool = True) -> None:
self.model.train(mode=mode)
def eval(self) -> None:
self.train(mode=False)
def query(self, inputs: torch.Tensor) -> torch.Tensor:
return self.rmodel(inputs)
def __call__(self, inputs: torch.Tensor) -> torch.Tensor:
return self.query(inputs)
| 33.087649
| 97
| 0.615653
|
b17c36265cd2fe08df7021090d94b14b34aa97f9
| 5,254
|
py
|
Python
|
torch_geometric/nn/conv/gmm_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | 1
|
2020-11-23T14:29:11.000Z
|
2020-11-23T14:29:11.000Z
|
torch_geometric/nn/conv/gmm_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | null | null | null |
torch_geometric/nn/conv/gmm_conv.py
|
shrey-bansal/pytorch_geometric
|
17108a08066b0a73530544d01719b186f2625ef2
|
[
"MIT"
] | null | null | null |
import torch
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from ..inits import zeros, glorot
class GMMConv(MessagePassing):
r"""The gaussian mixture model convolutional operator from the `"Geometric
Deep Learning on Graphs and Manifolds using Mixture Model CNNs"
<https://arxiv.org/abs/1611.08402>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \frac{1}{|\mathcal{N}(i)|}
\sum_{j \in \mathcal{N}(i)} \frac{1}{K} \sum_{k=1}^K
\mathbf{w}_k(\mathbf{e}_{i,j}) \odot \mathbf{\Theta}_k \mathbf{x}_j,
where
.. math::
\mathbf{w}_k(\mathbf{e}) = \exp \left( -\frac{1}{2} {\left(
\mathbf{e} - \mathbf{\mu}_k \right)}^{\top} \Sigma_k^{-1}
\left( \mathbf{e} - \mathbf{\mu}_k \right) \right)
denotes a weighting function based on trainable mean vector
:math:`\mathbf{\mu}_k` and diagonal covariance matrix
:math:`\mathbf{\Sigma}_k`.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
dim (int): Pseudo-coordinate dimensionality.
kernel_size (int): Number of kernels :math:`K`.
separate_gaussians (bool, optional): If set to :obj:`True`, will
learn separate GMMs for every pair of input and output channel,
inspired by traditional CNNs. (default: :obj:`False`)
aggr (string, optional): The aggregation operator to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"mean"`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add transformed root node features to the output.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels, dim, kernel_size,
separate_gaussians=False, aggr='mean', root_weight=True,
bias=True, **kwargs):
super(GMMConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.dim = dim
self.kernel_size = kernel_size
self.separate_gaussians = separate_gaussians
self.g = Parameter(
torch.Tensor(in_channels, out_channels * kernel_size))
if not self.separate_gaussians:
self.mu = Parameter(torch.Tensor(kernel_size, dim))
self.sigma = Parameter(torch.Tensor(kernel_size, dim))
else:
self.mu = Parameter(
torch.Tensor(in_channels, out_channels, kernel_size, dim))
self.sigma = Parameter(
torch.Tensor(in_channels, out_channels, kernel_size, dim))
if root_weight:
self.root = Parameter(torch.Tensor(in_channels, out_channels))
else:
self.register_parameter('root', None)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.g)
glorot(self.mu)
glorot(self.sigma)
glorot(self.root)
zeros(self.bias)
def forward(self, x, edge_index, pseudo):
""""""
x = x.unsqueeze(-1) if x.dim() == 1 else x
pseudo = pseudo.unsqueeze(-1) if pseudo.dim() == 1 else pseudo
N, K, M = x.size(0), self.kernel_size, self.out_channels
if not self.separate_gaussians:
out = torch.matmul(x, self.g).view(N, K, M)
out = self.propagate(edge_index, x=out, pseudo=pseudo)
else:
out = self.propagate(edge_index, x=x, pseudo=pseudo)
if self.root is not None:
out = out + torch.matmul(x, self.root)
if self.bias is not None:
out = out + self.bias
return out
def message(self, x_j, pseudo):
EPS = 1e-15
F, M = self.in_channels, self.out_channels
(E, D), K = pseudo.size(), self.kernel_size
if not self.separate_gaussians:
gaussian = -0.5 * (pseudo.view(E, 1, D) -
self.mu.view(1, K, D)).pow(2)
gaussian = gaussian / (EPS + self.sigma.view(1, K, D).pow(2))
gaussian = torch.exp(gaussian.sum(dim=-1)) # [E, K]
return (x_j.view(E, K, M) * gaussian.view(E, K, 1)).sum(dim=-2)
else:
gaussian = -0.5 * (pseudo.view(E, 1, 1, 1, D) -
self.mu.view(1, F, M, K, D)).pow(2)
gaussian = gaussian / (EPS + self.sigma.view(1, F, M, K, D).pow(2))
gaussian = torch.exp(gaussian.sum(dim=-1)) # [E, F, M, K]
gaussian = gaussian * self.g.view(1, F, M, K)
gaussian = gaussian.sum(dim=-1) # [E, F, M]
return (x_j.view(E, F, 1) * gaussian).sum(dim=-2) # [E, M]
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
| 38.072464
| 79
| 0.57461
|
66380fd391d9d62a64060be4225f406eaca6e5fd
| 501
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/generators5.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/generators5.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
packages/pyright-internal/src/tests/samples/generators5.py
|
lipovsek/pytea
|
c536515a5e5947fac8871784323ba7eddc58956d
|
[
"MIT"
] | null | null | null |
# This sample tests various type checking operations relating to
# async generator functions where the return type is declared.
from typing import AsyncIterable, AsyncIterator
async def g1_explicit() -> AsyncIterator[int]:
yield 1
yield 2
async def g2_explicit():
async for v in g1_explicit():
yield v
async def g3_explicit() -> AsyncIterable[int]:
yield 1
yield 2
async def g4_explicit():
async for v in g3_explicit():
yield v
| 20.04
| 65
| 0.668663
|
c4a12bd449c153af1a4891a952d953cf0bc1ca66
| 531
|
py
|
Python
|
packages/python/plotly/plotly/validators/heatmap/colorbar/_exponentformat.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/heatmap/colorbar/_exponentformat.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/heatmap/colorbar/_exponentformat.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class ExponentformatValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="exponentformat", parent_name="heatmap.colorbar", **kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["none", "e", "E", "power", "SI", "B"]),
**kwargs,
)
| 35.4
| 84
| 0.640301
|
3874ca2d0bcf303f0911b6fffc2c91174ab19f06
| 291
|
py
|
Python
|
8day/pan05.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
8day/pan05.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
8day/pan05.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
#Pan05
import chardet
def find_encoding(file):
f1 = open(file, 'rb').read()
#print(f1)
res = chardet.detect(f1)
encType = res['encoding']
return encType
import pandas as pd
file1 = './data/01. CCTV_in_Seoul.csv'
df1 = pd.read_csv(file1, encoding= find_encoding(file1))
| 20.785714
| 56
| 0.676976
|
75a28812f23ef9578f19bb919b6158135858ce77
| 1,346
|
py
|
Python
|
girvi/filters.py
|
rajeshr188/django-onex
|
f1086a4159b1d135e54327c77c93fcc6c446338f
|
[
"MIT"
] | 2
|
2019-06-08T22:50:59.000Z
|
2020-07-12T14:13:18.000Z
|
girvi/filters.py
|
rajeshr188/django-onex
|
f1086a4159b1d135e54327c77c93fcc6c446338f
|
[
"MIT"
] | 13
|
2020-02-11T23:51:43.000Z
|
2021-06-05T13:10:49.000Z
|
girvi/filters.py
|
rajeshr188/django-onex
|
f1086a4159b1d135e54327c77c93fcc6c446338f
|
[
"MIT"
] | null | null | null |
from .models import Loan, LoanStatement,Release,Adjustment
from contact.models import Customer
import django_filters
from django_select2.forms import Select2Widget
class LoanFilter(django_filters.FilterSet):
loanid=django_filters.CharFilter(lookup_expr='icontains')
itemdesc=django_filters.CharFilter(lookup_expr='icontains')
customer=django_filters.ModelChoiceFilter(
queryset = Customer.objects.filter(type='Re',active = True),
widget=Select2Widget)
Status = django_filters.BooleanFilter(field_name='release', method='filter_status')
def filter_status(self,queryset,name,value):
return queryset.filter(release__isnull=value)
class Meta:
model=Loan
fields=['loanid','series','customer','itemtype','itemweight','itemdesc','loanamount']
class LoanStatementFilter(django_filters.FilterSet):
loan = django_filters.ModelChoiceFilter(
widget = Select2Widget,
queryset = Loan.objects.filter(series__is_active = True)
)
class Meta:
model = LoanStatement
fields = ['loan']
class AdjustmentFilter(django_filters.FilterSet):
class Meta:
model = Adjustment
fields = ['loan']
class ReleaseFilter(django_filters.FilterSet):
class Meta:
model = Release
fields = ['releaseid','loan']
| 35.421053
| 93
| 0.70951
|
f39e96656d5c1472b3ffaa5714389a87faff49b2
| 3,675
|
py
|
Python
|
src/sentry/models/groupmeta.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/groupmeta.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/models/groupmeta.py
|
ChadKillingsworth/sentry
|
ffcb9007a95a83ee267935fe605f8ee8f03a85a5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
sentry.models.groupmeta
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import threading
from celery.signals import task_postrun
from django.core.signals import request_finished
from django.db import models
from sentry.exceptions import CacheNotPopulated
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
from sentry.db.models.manager import BaseManager
ERR_CACHE_MISISNG = 'Cache not populated for instance id=%s'
class GroupMetaManager(BaseManager):
def __init__(self, *args, **kwargs):
super(GroupMetaManager, self).__init__(*args, **kwargs)
self.__local_cache = threading.local()
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_GroupMetaManager__local_cache', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__local_cache = threading.local()
def _get_cache(self):
if not hasattr(self.__local_cache, 'value'):
self.__local_cache.value = {}
return self.__local_cache.value
def _set_cache(self, value):
self.__local_cache.value = value
__cache = property(_get_cache, _set_cache)
def contribute_to_class(self, model, name):
model.CacheNotPopulated = CacheNotPopulated
super(GroupMetaManager, self).contribute_to_class(model, name)
task_postrun.connect(self.clear_local_cache)
request_finished.connect(self.clear_local_cache)
def clear_local_cache(self, **kwargs):
self.__cache = {}
def populate_cache(self, instance_list):
for group in instance_list:
self.__cache.setdefault(group.id, {})
results = self.filter(
group__in=instance_list,
).values_list('group', 'key', 'value')
for group_id, key, value in results:
self.__cache[group_id][key] = value
def get_value_bulk(self, instance_list, key, default=None):
results = {}
for instance in instance_list:
try:
inst_cache = self.__cache[instance.id]
except KeyError:
raise self.model.CacheNotPopulated(ERR_CACHE_MISISNG % (instance.id,))
results[instance] = inst_cache.get(key, default)
return results
def get_value(self, instance, key, default=None):
try:
inst_cache = self.__cache[instance.id]
except KeyError:
raise self.model.CacheNotPopulated(ERR_CACHE_MISISNG % (instance.id,))
return inst_cache.get(key, default)
def unset_value(self, instance, key):
self.filter(group=instance, key=key).delete()
try:
del self.__cache[instance.id][key]
except KeyError:
pass
def set_value(self, instance, key, value):
self.create_or_update(
group=instance,
key=key,
defaults={
'value': value,
},
)
self.__cache.setdefault(instance.id, {})
self.__cache[instance.id][key] = value
class GroupMeta(Model):
"""
Arbitrary key/value store for Groups.
Generally useful for things like storing metadata
provided by plugins.
"""
group = FlexibleForeignKey('sentry.Group')
key = models.CharField(max_length=64)
value = models.TextField()
objects = GroupMetaManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupmeta'
unique_together = (('group', 'key'),)
__repr__ = sane_repr('group_id', 'key', 'value')
| 30.122951
| 86
| 0.646531
|
23418c7b5788e28db416fa1622a539028b61882b
| 6,401
|
py
|
Python
|
pystash/web.py
|
ashishchopra/pystash
|
6fad5e7ea4684eb4cf749028be2162c047db1f30
|
[
"MIT"
] | 6
|
2015-02-23T19:41:42.000Z
|
2015-12-24T10:25:54.000Z
|
pystash/web.py
|
ashishchopra/pystash
|
6fad5e7ea4684eb4cf749028be2162c047db1f30
|
[
"MIT"
] | null | null | null |
pystash/web.py
|
ashishchopra/pystash
|
6fad5e7ea4684eb4cf749028be2162c047db1f30
|
[
"MIT"
] | 2
|
2017-06-01T04:40:08.000Z
|
2021-08-30T20:15:04.000Z
|
# -*- coding: utf-8 -*-
from requests.auth import AuthBase
import requests
import json
import hashlib
import sys
import os
import getpass
from clint.textui import colored
from common import output
import netrc
STASH_HOST = 'http://getstash.herokuapp.com'
if 'STASH_HOST' in os.environ:
STASH_HOST = os.environ['STASH_HOST']
class DuplicateKeyword(Exception):
"""
Key already exist
"""
pass
class WrongArgumentsSet(Exception):
"""
Not enough arguments
"""
pass
class WrongKey(Exception):
"""
Key not found
"""
pass
class NoInternetConnection(Exception):
"""
No Internet connection or server not available
"""
pass
class ServerError(Exception):
"""
Server error
"""
pass
class UnknownServerError(Exception):
"""
Unknown server error
"""
pass
class WrongCredentials(Exception):
pass
class TokenAuth(AuthBase):
"""Attaches HTTP Token Authentication to the given Request object."""
def __init__(self, username, password):
# setup any auth-related data here
self.username = username
self.password = password
def __call__(self, r):
# modify and return the request
r.headers['X-Token'] = self.password
return r
class AlreadyLoggedIn(Exception):
pass
class API(object):
username = None
token = None
def check_login(self):
"""
Check if user logged in. If True - return login and token, else returns None
"""
netrc_path = os.path.join(os.path.expanduser('~'), '.netrc')
if not os.path.exists(netrc_path):
open(netrc_path, 'w').close()
info = netrc.netrc()
login, account, password = info.authenticators(STASH_HOST) or (None, None, None)
if password and login:
if self.username is None or self.token is None:
self.username = login
# todo: why token is equal to password?
self.token = password
return login, password
return None
def login_decorator(fn):
def wrapper(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], API):
if args[0].check_login() is not None:
return fn(*args, **kwargs)
raise Exception('Unknown credentials.\nTry to do stash login at first.\n')
#output('Unknown credentials.\nTry to do stash login at first.\n', color='yellow')
return wrapper
def send_request_decorator(fn):
"""
Request decorator (avoiding code duplication)
"""
def wrapper(self, *args):
data = fn(self, *args)
data.update(self.get_user_data())
url = STASH_HOST + '/api/json'
try:
data['token'] = self.token
headers = {'Stash-Token': self.token}
r = requests.post(url, data=json.dumps(data), headers=headers)
except requests.exceptions.ConnectionError:
raise NoInternetConnection
# todo: replace with regular python exceptions
if r.status_code == 404:
raise WrongKey
if r.status_code == 401:
raise WrongCredentials
if r.status_code == 500:
raise ServerError
if r.status_code == 200:
return r.json()
else:
return UnknownServerError
return wrapper
def get_user_data(self):
return {'user': self.username}
def login(self, login, password):
if self.check_login() is not None:
raise AlreadyLoggedIn
m = hashlib.new('md5')
m.update(password)
r = self.get_token(login, password)
#TODO check if r is an error (remove / from stash host for example)
if 'token' in r:
# todo: maybe we don't need this two lines?
self.username = login
self.token = r['token']
with open(os.path.join(os.environ['HOME'], ".netrc"), "a") as f:
f.write("machine " + STASH_HOST + " login " + login + " password " + str(r['token']) + "\n")
f.close()
else:
# todo: do something
pass
if 'error' in r:
raise Exception(r['error'])
return True
def logout(self):
"""
Clear .netrc record
"""
netrc_path = os.path.join(os.path.expanduser('~'), '.netrc')
if not os.path.exists(netrc_path):
open(netrc_path, 'w').close()
info = netrc.netrc()
if STASH_HOST in info.hosts:
del info.hosts[STASH_HOST]
else:
raise Exception('You haven\'t logged in yet')
with open(netrc_path, 'w') as f:
f.write(info.__repr__())
f.close()
return True
# ==========
@send_request_decorator
@login_decorator
def get(self, key):
return {'get': key}
@send_request_decorator
@login_decorator
def search(self, key):
return {'search': key}
@send_request_decorator
@login_decorator
def set(self, key, value, tags, overwrite=False,append=False):
return {'set': { key: value }, 'tags' : tags, 'overwrite': overwrite, 'append' : append}
@send_request_decorator
@login_decorator
def delete(self, key):
return {'delete': key}
@send_request_decorator
@login_decorator
def all(self):
return {'getkeys': True}
@send_request_decorator
@login_decorator
def gettags(self):
return {'gettags': True}
@send_request_decorator
@login_decorator
def tags(self, key):
return {'tags': key }
@send_request_decorator
@login_decorator
def push(self, list_title, value):
return {'push': {list_title: value}}
@send_request_decorator
def get_token(self, username, password):
return {'login': {username: password}}
# =========
@login_decorator
@send_request_decorator
def sync(self, local_db_data):
return { 'sync' : local_db_data }
@send_request_decorator
def get_token(self, username, password):
return {'login': {username: password}}
def push(self):
"""Push data to cloud"""
def pull(self):
"""Pull data from cloud"""
| 25.91498
| 108
| 0.578816
|
a2641655841c9b41f0882b5bd354b408ef903a9a
| 5,833
|
py
|
Python
|
from_3b1b/on_hold/holomorphic.py
|
WRangers/manim
|
38acb22c7e5c1a95afdf05678e07ad506513d676
|
[
"MIT"
] | 1
|
2021-09-20T14:16:46.000Z
|
2021-09-20T14:16:46.000Z
|
source/manimlib/from_3b1b/on_hold/holomorphic.py
|
WRangers/manim_doc
|
22e0936ab3b409358fa71b335897aa99aa46b5a0
|
[
"MIT"
] | null | null | null |
source/manimlib/from_3b1b/on_hold/holomorphic.py
|
WRangers/manim_doc
|
22e0936ab3b409358fa71b335897aa99aa46b5a0
|
[
"MIT"
] | 1
|
2021-08-23T04:14:19.000Z
|
2021-08-23T04:14:19.000Z
|
from manimlib.imports import *
class ComplexAnalysisOverlay(Scene):
def construct(self):
words = TextMobject("Complex analysis")
words.scale(1.25)
words.to_edge(UP)
words.add_background_rectangle()
self.add(words)
self.wait()
class AnalyzeZSquared(ComplexTransformationScene, ZoomedScene):
CONFIG = {
"plane_config": {
"line_frequency": 0.1,
},
"num_anchors_to_add_per_line": 20,
"complex_homotopy": lambda z, t: z**(1.0 + t),
"zoom_factor": 0.05,
}
def setup(self):
ComplexTransformationScene.setup(self)
ZoomedScene.setup(self)
def construct(self):
set_gpus([0,1])
self.edit_background_plane()
self.add_title()
# self.add_transforming_planes()
# self.preview_some_numbers()
self.zoom_in_to_one_plus_half_i()
self.write_derivative()
def add_title(self):
title = TexMobject("z \\rightarrow z^2")
title.add_background_rectangle()
title.scale(1.5)
title.to_corner(UL, buff=MED_SMALL_BUFF)
self.add_foreground_mobject(title)
def edit_background_plane(self):
self.backgrounds.set_stroke(GREY, 2)
self.background.secondary_lines.set_stroke(DARK_GREY, 1)
self.add_foreground_mobject(self.background.coordinate_labels)
def add_transforming_planes(self):
self.plane = self.get_plane()
self.add_transformable_mobjects(self.plane)
def preview_some_numbers(self):
dots = VGroup(*[
Dot().move_to(self.background.number_to_point(z))
for z in [
1, 2, complex(0, 1),
-1, complex(2, 0.5), complex(-1, -1), complex(3, 0.5),
]
])
dots.set_color_by_gradient(RED, YELLOW)
d_angle = 30 * DEGREES
dot_groups = VGroup()
for dot in dots:
point = dot.get_center()
z = self.background.point_to_number(point)
z_out = self.complex_homotopy(z, 1)
out_point = self.background.number_to_point(z_out)
path_arc = angle_of_vector(point)
if abs(z - 1) < 0.01:
# One is special
arrow = Arc(
start_angle=(-90 * DEGREES + d_angle),
angle=(360 * DEGREES - 2 * d_angle),
radius=0.25
)
arrow.add_tip(tip_length=0.15)
arrow.pointwise_become_partial(arrow, 0, 0.9)
arrow.next_to(dot, UP, buff=0)
else:
arrow = Arrow(
point, out_point,
path_arc=path_arc,
buff=SMALL_BUFF,
)
arrow.match_color(dot)
out_dot = dot.copy()
# out_dot.set_fill(opacity=0.5)
out_dot.set_stroke(BLUE, 1)
out_dot.move_to(out_point)
dot.path_arc = path_arc
dot.out_dot = out_dot
dot_group = VGroup(dot, arrow, out_dot)
dot_groups.add(dot_group)
dot_copy = dot.copy()
dot.save_state()
dot.scale(3)
dot.fade(1)
dot_group.anim = Succession(
ApplyMethod(dot.restore),
AnimationGroup(
ShowCreation(arrow),
ReplacementTransform(
dot_copy, out_dot,
path_arc=path_arc
)
)
)
for dot_group in dot_groups[:3]:
self.play(dot_group.anim)
self.wait()
self.play(*[dg.anim for dg in dot_groups[3:]])
self.apply_complex_homotopy(
self.complex_homotopy,
added_anims=[Animation(dot_groups)]
)
self.wait()
self.play(FadeOut(dot_groups))
self.wait()
self.play(FadeOut(self.plane))
self.transformable_mobjects.remove(self.plane)
def zoom_in_to_one_plus_half_i(self):
z = complex(1, 0.5)
point = self.background.number_to_point(z)
point_mob = VectorizedPoint(point)
frame = self.zoomed_camera.frame
frame.move_to(point)
tiny_plane = NumberPlane(
x_radius=2, y_radius=2,
color=GREEN,
secondary_color=GREEN_E
)
tiny_plane.replace(frame)
plane = self.get_plane()
words = TextMobject("What does this look like")
words.add_background_rectangle()
words.next_to(self.zoomed_display, LEFT, aligned_edge=UP)
arrow = Arrow(words.get_bottom(), self.zoomed_display.get_left())
VGroup(words, arrow).set_color(YELLOW)
self.play(FadeIn(plane))
self.activate_zooming(animate=True)
self.play(ShowCreation(tiny_plane))
self.wait()
self.add_transformable_mobjects(plane, tiny_plane, point_mob)
self.add_foreground_mobjects(words, arrow)
self.apply_complex_homotopy(
self.complex_homotopy,
added_anims=[
Write(words),
GrowArrow(arrow),
MaintainPositionRelativeTo(frame, point_mob)
]
)
self.wait(2)
def write_derivative(self):
pass
# Helpers
def get_plane(self):
top_plane = NumberPlane(
y_radius=FRAME_HEIGHT / 2,
x_line_frequency=0.1,
y_line_frequency=0.1,
)
self.prepare_for_transformation(top_plane)
bottom_plane = top_plane.copy()
tiny_tiny_buff = 0.001
top_plane.next_to(ORIGIN, UP, buff=tiny_tiny_buff)
bottom_plane.next_to(ORIGIN, DOWN, buff=tiny_tiny_buff)
return VGroup(top_plane, bottom_plane)
| 31.701087
| 73
| 0.564032
|
59fb44dc6cef37153964603c83f25d1ee81c7672
| 2,698
|
py
|
Python
|
plugins/lookup/vars_dict_start_with.py
|
franzs/ansible-plugins
|
1babfc9f0876b3dc54cc680931543bc2217e6ed4
|
[
"MIT"
] | 2
|
2015-11-03T14:28:57.000Z
|
2018-03-31T18:41:04.000Z
|
plugins/lookup/vars_dict_start_with.py
|
franzs/ansible-plugins
|
1babfc9f0876b3dc54cc680931543bc2217e6ed4
|
[
"MIT"
] | 3
|
2017-09-20T20:32:29.000Z
|
2020-01-22T14:51:18.000Z
|
plugins/lookup/vars_dict_start_with.py
|
franzs/ansible-plugins
|
1babfc9f0876b3dc54cc680931543bc2217e6ed4
|
[
"MIT"
] | 1
|
2018-06-27T07:35:07.000Z
|
2018-06-27T07:35:07.000Z
|
# (c) 2018 David Lundgren
# MIT
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: vars_dict_starts_with
author: David Lundgren
version_added: "2.5"
short_description: Lookup templated value of variables that start with a prefix
description:
- Retrieves the value of an Ansible variable.
options:
_terms:
description: The variable names to look up.
required: True
default:
description:
- What to return if a variable is undefined.
- If no default is set, it will result in an error if any of the variables is undefined.
"""
EXAMPLES = """
- name: find several related variables
debug: msg="{{ lookup('vars_dict_start_with', 'ansible_play') }}"
- name: find several related variables excluding others
debug: msg="{{ lookup('vars_dict_start_with', 'ansible_play', '!ansible_play_never') }}"
- name: alternate way to find some 'prefixed vars' in loop
debug: msg="{{ lookup('vars', 'ansible_play_' + item) }}"
loop:
- hosts
- batch
- hosts_all
"""
RETURN = """
_value:
description:
- value of the variables requested.
"""
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def is_excluded(self, excludes, key):
if key in excludes:
return True
for term in excludes:
if key.startswith(term):
return True
return False
def run(self, terms, variables=None, **kwargs):
if variables is not None:
self._templar.set_available_variables(variables)
myvars = getattr(self._templar, '_available_variables', {})
self.set_options(direct=kwargs)
default = self.get_option('default')
excludes = []
for idx,term in enumerate(terms):
if term.startswith('!'):
excludes.append(term[1:])
terms.pop(idx)
ret = {}
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
try:
for key in myvars:
if key.startswith(term) and not self.is_excluded(excludes, key):
ret.update(self._templar.template(myvars[key], fail_on_undefined=True))
for key in myvars['hostvars'][myvars['inventory_hostname']]:
if key.startswith(term) and not self.is_excluded(excludes, key):
ret.update(self._templar.template(myvars['hostvars'][myvars['inventory_hostname']][key], fail_on_undefined=True))
except AnsibleUndefinedVariable:
if default is not None:
ret.update(default)
else:
raise
return ret
| 29.010753
| 119
| 0.699036
|
d6f1a867c1e8f0150a4954ddeb454cb8768dd876
| 1,057
|
py
|
Python
|
datastructure/practice/c2/c_2_27.py
|
stoneyangxu/python-kata
|
979af91c74718a525dcd2a83fe53ec6342af9741
|
[
"MIT"
] | null | null | null |
datastructure/practice/c2/c_2_27.py
|
stoneyangxu/python-kata
|
979af91c74718a525dcd2a83fe53ec6342af9741
|
[
"MIT"
] | null | null | null |
datastructure/practice/c2/c_2_27.py
|
stoneyangxu/python-kata
|
979af91c74718a525dcd2a83fe53ec6342af9741
|
[
"MIT"
] | null | null | null |
import unittest
import time
class Range:
def __init__(self, start, stop=None, step=1):
if step == 0:
raise ValueError("step cannot be 0")
if stop is None:
start, stop = 0, start
self._length = max(0, (stop - start + step - 1) // step)
self._start = start
self._step = step
def __len__(self):
return self._length
def __getitem__(self, i):
if i < 0:
i += len(self)
if not 0 <= i < self._length:
raise IndexError("index out of range")
return self._start + i * self._step
def __contains__(self, val):
return (val - self._start) % self._step == 0
class MyTestCase(unittest.TestCase):
def test_something(self):
start = time.time()
print(2 in Range(100000000))
time1 = time.time()
print(9999999 in Range(100000000))
time2 = time.time()
print(time2 - time1, time1 - start, (time2 - time1) // (time1 - start))
if __name__ == '__main__':
unittest.main()
| 23.488889
| 79
| 0.561022
|
ce4fc51f61fbd73810eeb56a5985ea09981fb25b
| 14,597
|
py
|
Python
|
src/gui/main_gui_controller.py
|
kamilcieslik/test_house_price_app
|
f7c5786d0e79e23bafaedd24088aa506c04b5527
|
[
"MIT"
] | 1
|
2019-02-15T03:42:43.000Z
|
2019-02-15T03:42:43.000Z
|
src/gui/main_gui_controller.py
|
kamilcieslik/test_house_price_app
|
f7c5786d0e79e23bafaedd24088aa506c04b5527
|
[
"MIT"
] | 1
|
2021-06-01T22:12:11.000Z
|
2021-06-01T22:12:11.000Z
|
src/gui/main_gui_controller.py
|
kamilcieslik/test_house_price_app
|
f7c5786d0e79e23bafaedd24088aa506c04b5527
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5 import QtWidgets
from src.gui.main_gui import GuiMainWindow
from src.gui.widgets_data_validation import DataValidation
from calculator.prices_calculator import PricesCalculator
from calculator import exception
from googlemaps.exceptions import TransportError
import os
from pathlib import Path
from ctypes import cdll
import ctypes
dll_path = str(Path(os.getcwd()).parent) + "\\native_c\\Converter.dll"
if os.path.exists(dll_path):
libc = cdll.LoadLibrary(dll_path)
class MainGuiController(object):
def __init__(self, google_api_key):
self._prices_calculator = PricesCalculator(google_api_key)
self._data_validation = DataValidation
# Inicjalizacja komponentów GUI.
self._ui = GuiMainWindow()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
self._ui.setup_ui(MainWindow)
# Inicjacja listenerów komponentów GUI.
self.init_listeners()
# Inicjacja kontroli wprowadzanych danych.
self.init_data_validation()
# Wypełnienie combo-boxów danymi.
self.fill_combo_boxes_with_data()
# Zresetowanie wartości komponentów (nadpisanie przykładowych).
self.reset_components_values()
# Wyświetlenie GUI.
MainWindow.show()
sys.exit(app.exec_())
def init_data_validation(self):
self \
._ui.text_edit_construction_year \
.textChanged \
.connect(lambda: self
._data_validation
.text_edit_validation(self._ui
.text_edit_construction_year,
self._ui.
label_construction_year_message,
"Podaj rok budowy.",
"^[1-9]{1}[0-9]{3}$",
"Niepoprawny format."))
self \
._ui.text_edit_meters \
.textChanged.connect(lambda: self
._data_validation
.text_edit_validation(self._ui
.text_edit_meters,
self._ui.
label_meters_message,
"Podaj il. metrów kw.",
"^[1-9]{1}[0-9]{1,2}$",
"Niepoprawny format."))
self \
._ui.combo_box_address. \
currentTextChanged \
.connect(lambda: self._data_validation
.text_edit_validation(self._ui
.combo_box_address,
self._ui.label_address_message,
"Wybierz adres."))
self \
._ui.combo_box_market_type \
.currentTextChanged \
.connect(lambda: self._data_validation
.text_edit_validation(self._ui
.combo_box_market_type,
self._ui.label_market_type_message,
"Wybierz rynek."))
self._ui.combo_box_building_type \
.currentTextChanged \
.connect(lambda: self._data_validation
.text_edit_validation(self._ui
.combo_box_building_type,
self._ui
.label_building_type_message,
"Wybierz rodzaj zabudowy."))
self._ui.combo_box_building_material \
.currentTextChanged \
.connect(lambda: self._data_validation
.text_edit_validation(self._ui
.combo_box_building_material,
self._ui
.label_building_material_message,
"Wybierz materiał budynku."))
def init_listeners(self):
self._ui.push_button_calculate.clicked.connect(
self.push_button_calculate_on_click)
self._ui.push_button_reset.clicked.connect(
self.push_button_reset_on_click)
self._ui.push_button_search_address.clicked.connect(
self.push_button_search_address_on_click)
self._ui.combo_box_address.currentIndexChanged.connect(
self.combo_box_address_on_index_changed)
def fill_combo_boxes_with_data(self):
self._ui.combo_box_building_type.addItem("")
self._ui.combo_box_building_type.addItems(
self._prices_calculator.building_types)
self._ui.combo_box_market_type.addItem("")
self._ui.combo_box_market_type.addItems(
self._prices_calculator.market_types)
self._ui.combo_box_building_material.addItem("")
self._ui.combo_box_building_material.addItems(
self._prices_calculator.building_materials)
def reset_components_values(self):
self._ui.text_edit_search_address.setText("")
if self._prices_calculator.autocomplete_addresses:
self._prices_calculator.autocomplete_addresses = []
self._ui.combo_box_address.setCurrentIndex(0)
self._ui.combo_box_market_type.setCurrentIndex(0)
self._ui.text_edit_construction_year.setText("")
self._ui.text_edit_meters.setText("")
self._ui.combo_box_building_type.setCurrentIndex(0)
self._ui.combo_box_building_material.setCurrentIndex(0)
self._ui.check_box_balcony.setChecked(False)
self._ui.chech_box_cellar.setChecked(False)
self._ui.check_box_garden.setChecked(False)
self._ui.check_box_terrace.setChecked(False)
self._ui.check_box_elevator.setChecked(False)
self._ui.check_box_separate_kitchen.setChecked(False)
self._ui.check_box_guarded_estate.setChecked(False)
self._ui.label_reference_city_data.setText("------")
self._ui.label_reference_city_price_per_meter_data.setText("------")
self._ui.label_distance_data.setText("------")
self._ui.label_basic_price_per_meter_data.setText("------")
self._ui.label_final_price_data.setText("------")
self._ui.label_final_price_per_meter_data.setText("------")
def push_button_calculate_on_click(self):
if self._ui.label_address_message.text() == "" and self \
._ui.label_building_type_message.text() == "" \
and self._ui.label_market_type_message.text() == "" \
and self._ui.label_construction_year_message.text() == "" \
and self._ui.label_meters_message.text() == "" \
and self._ui.label_building_material_message.text() == "":
try:
calculator_result = self._prices_calculator \
.calculate_house_price(self._ui.combo_box_building_type # TUTAJ
.currentText(),
self._ui.combo_box_market_type
.currentText(),
self._ui.combo_box_building_material
.currentText(),
self._ui.text_edit_construction_year
.toPlainText(),
self._ui.text_edit_meters
.toPlainText(),
self._ui.check_box_balcony
.isChecked(),
self._ui.chech_box_cellar
.isChecked(),
self._ui.check_box_garden
.isChecked(),
self._ui.check_box_terrace
.isChecked(),
self._ui.check_box_elevator
.isChecked(),
self._ui.check_box_separate_kitchen
.isChecked(),
self._ui.check_box_guarded_estate
.isChecked())
if self._ui.combo_box_market_type.currentText() == "pierwotny":
self._ui.label_reference_city_price_per_meter_data \
.setText(str(round(calculator_result
.nearest_reference_city
.price_per_meter_on_primary_market,
2)) + " zł")
else:
self._ui.label_reference_city_price_per_meter_data \
.setText(str(round(calculator_result
.nearest_reference_city
.price_per_meter_on_aftermarket, 2))
+ " zł")
string_upper = libc.string_toupper
string_upper.argtypes = [ctypes.c_char_p]
string_upper.restype = ctypes.c_char_p
self.set_reference_city_name\
(str(string_upper(str.encode(calculator_result.nearest_reference_city.name)), "utf-8"))
self._ui.label_distance_data.setText(
str(round(calculator_result
.distance_from_flat_to_nearest_reference_city /
1000,
2))
+ " km")
self._ui.label_basic_price_per_meter_data.setText(
str(round(calculator_result.basic_price_per_meter, 2))
+ " zł")
self._ui.label_final_price_data.setText(
str(round(calculator_result.house_price, 2)) + " zł")
self._ui.label_final_price_per_meter_data.setText(
str(round(calculator_result.final_price_per_meter, 2))
+ " zł")
self._ui.browser.page().runJavaScript("drawFlatMarker=true;")
self._ui.browser.page().runJavaScript(
"drawReferenceCity=true;")
self._ui.browser.page().runJavaScript(
"yourFlatLat=" + str(
self._prices_calculator.selected_address.latitude))
self._ui.browser.page().runJavaScript(
"yourFlatLng=" + str(
self._prices_calculator.selected_address.longitude))
self._ui.browser.page().runJavaScript(
"referenceCityLat=" + str(
calculator_result.nearest_reference_city.latitude))
self._ui.browser.page().runJavaScript(
"referenceCityLng=" + str(
calculator_result.nearest_reference_city.longitude))
self._ui.browser.page().runJavaScript("initMap();")
except (exception.ConstructionYearViolationException,
exception.FlatParameterMismatchException) as e:
msgBox = QtWidgets.QMessageBox(self._ui.widget_central)
msgBox.about(self._ui.widget_central, "Ostrzeżenie",
"Operacja oszacowania wartości nie powiedzie się."
"\n"
"Powód: {}.".format(str(e)))
else:
msgBox = QtWidgets.QMessageBox(self._ui.widget_central)
msgBox.about(self._ui.widget_central, "Ostrzeżenie",
"Operacja oszacowania wartości nie powiedzie się.\n"
"Powód: Nie wszystkie wartości mają poprawny format.")
def push_button_reset_on_click(self):
self.reset_components_values()
self._ui.browser.page().runJavaScript("drawFlatMarker=false;")
self._ui.browser.page().runJavaScript("drawReferenceCity=false;")
self._ui.browser.page().runJavaScript("initMap();")
def push_button_search_address_on_click(self):
try:
self._ui.combo_box_address.clear()
self._prices_calculator.autocomplete_addresses = \
self._ui.text_edit_search_address.toPlainText()
if self._prices_calculator.autocomplete_addresses:
self._ui.combo_box_address.addItem("")
for address in self._prices_calculator.autocomplete_addresses:
self._ui.combo_box_address.addItem(str(address))
self._ui.browser.page().runJavaScript("drawFlatMarker=false;")
self._ui.browser.page().runJavaScript(
"drawReferenceCity=false;")
self._ui.browser.page().runJavaScript("initMap();")
except TransportError as e:
msgBox = QtWidgets.QMessageBox(self._ui.widget_central)
msgBox.about(self._ui.widget_central, "Ostrzeżenie",
"Operacja typowania adresów nie powiodła się.\n"
"Powód: {}.".format(str(e)))
def combo_box_address_on_index_changed(self):
if self._ui.combo_box_address.currentText() != "":
self._prices_calculator.selected_address = \
self._ui.combo_box_address.currentIndex() - 1
self._ui.browser.page().runJavaScript("drawFlatMarker=true;")
self._ui.browser.page().runJavaScript("drawReferenceCity=false;")
self._ui.browser.page().runJavaScript(
"yourFlatLat=" + str(
self._prices_calculator.selected_address.latitude))
self._ui.browser.page().runJavaScript(
"yourFlatLng=" + str(
self._prices_calculator.selected_address.longitude))
self._ui.browser.page().runJavaScript("initMap();")
def set_reference_city_name(self, converted_reference_city_name):
self._ui.label_reference_city_data.setText(converted_reference_city_name)
| 49.314189
| 107
| 0.536823
|
57e58f2eebb06c089b1fed3da60669000227a5e5
| 8,100
|
py
|
Python
|
tools/subunit-trace.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | 1
|
2021-06-12T14:54:52.000Z
|
2021-06-12T14:54:52.000Z
|
tools/subunit-trace.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | null | null | null |
tools/subunit-trace.py
|
queria/my-tempest
|
a9cdee0201bb956c7502fd372dab467b056ba67f
|
[
"Apache-2.0"
] | 1
|
2017-07-11T13:54:27.000Z
|
2017-07-11T13:54:27.000Z
|
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trace a subunit stream in reasonable detail and high accuracy."""
import argparse
import functools
import re
import sys
import subunit
import testtools
DAY_SECONDS = 60 * 60 * 24
FAILS = []
RESULTS = {}
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
def get_duration(timestamps):
start, end = timestamps
if not start or not end:
duration = ''
else:
delta = end - start
duration = '%d.%06ds' % (
delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
return duration
def find_worker(test):
for tag in test['tags']:
if tag.startswith('worker-'):
return int(tag[7:])
return 'NaN'
# Print out stdout/stderr if it exists, always
def print_attachments(stream, test, all_channels=False):
"""Print out subunit attachments.
Print out subunit attachments that contain content. This
runs in 2 modes, one for successes where we print out just stdout
and stderr, and an override that dumps all the attachments.
"""
channels = ('stdout', 'stderr')
for name, detail in test['details'].items():
# NOTE(sdague): the subunit names are a little crazy, and actually
# are in the form pythonlogging:'' (with the colon and quotes)
name = name.split(':')[0]
if detail.content_type.type == 'test':
detail.content_type.type = 'text'
if (all_channels or name in channels) and detail.as_text():
title = "Captured %s:" % name
stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
# indent attachment lines 4 spaces to make them visually
# offset
for line in detail.as_text().split('\n'):
stream.write(" %s\n" % line)
def show_outcome(stream, test, print_failures=False):
global RESULTS
status = test['status']
# TODO(sdague): ask lifeless why on this?
if status == 'exists':
return
worker = find_worker(test)
name = cleanup_test_name(test['id'])
duration = get_duration(test['timestamps'])
if worker not in RESULTS:
RESULTS[worker] = []
RESULTS[worker].append(test)
# don't count the end of the return code as a fail
if name == 'process-returncode':
return
if status == 'success':
stream.write('{%s} %s [%s] ... ok\n' % (
worker, name, duration))
print_attachments(stream, test)
elif status == 'fail':
FAILS.append(test)
stream.write('{%s} %s [%s] ... FAILED\n' % (
worker, name, duration))
if not print_failures:
print_attachments(stream, test, all_channels=True)
elif status == 'skip':
stream.write('{%s} %s ... SKIPPED: %s\n' % (
worker, name, test['details']['reason'].as_text()))
else:
stream.write('{%s} %s [%s] ... %s\n' % (
worker, name, duration, test['status']))
if not print_failures:
print_attachments(stream, test, all_channels=True)
stream.flush()
def print_fails(stream):
"""Print summary failure report.
Currently unused, however there remains debate on inline vs. at end
reporting, so leave the utility function for later use.
"""
if not FAILS:
return
stream.write("\n==============================\n")
stream.write("Failed %s tests - output below:" % len(FAILS))
stream.write("\n==============================\n")
for f in FAILS:
stream.write("\n%s\n" % f['id'])
stream.write("%s\n" % ('-' * len(f['id'])))
print_attachments(stream, f, all_channels=True)
stream.write('\n')
def count_tests(key, value):
count = 0
for k, v in RESULTS.items():
for item in v:
if key in item:
if re.search(value, item[key]):
count += 1
return count
def run_time():
runtime = 0.0
for k, v in RESULTS.items():
for test in v:
runtime += float(get_duration(test['timestamps']).strip('s'))
return runtime
def worker_stats(worker):
tests = RESULTS[worker]
num_tests = len(tests)
delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
return num_tests, delta
def print_summary(stream):
stream.write("\n======\nTotals\n======\n")
stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
run_time()))
stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
# we could have no results, especially as we filter out the process-codes
if RESULTS:
stream.write("\n==============\nWorker Balance\n==============\n")
for w in range(max(RESULTS.keys()) + 1):
if w not in RESULTS:
stream.write(
" - WARNING: missing Worker %s! "
"Race in testr accounting.\n" % w)
else:
num, time = worker_stats(w)
stream.write(" - Worker %s (%s tests) => %ss\n" %
(w, num, time))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--no-failure-debug', '-n', action='store_true',
dest='print_failures', help='Disable printing failure '
'debug information in realtime')
parser.add_argument('--fails', '-f', action='store_true',
dest='post_fails', help='Print failure debug '
'information after the stream is proccesed')
return parser.parse_args()
def main():
args = parse_args()
stream = subunit.ByteStreamToStreamResult(
sys.stdin, non_subunit_name='stdout')
outcomes = testtools.StreamToDict(
functools.partial(show_outcome, sys.stdout,
print_failures=args.print_failures))
summary = testtools.StreamSummary()
result = testtools.CopyStreamResult([outcomes, summary])
result.startTestRun()
try:
stream.run(result)
finally:
result.stopTestRun()
if count_tests('status', '.*') == 0:
print("The test run didn't actually run any tests")
return 1
if args.post_fails:
print_fails(sys.stdout)
print_summary(sys.stdout)
return (0 if summary.wasSuccessful() else 1)
if __name__ == '__main__':
sys.exit(main())
| 32.66129
| 79
| 0.596543
|
080ceaf39eda9a13dae50629f0ca816ca74a421f
| 1,745
|
py
|
Python
|
apps/drug_target_interaction/sman/metrics.py
|
RuikangSun/PaddleHelix
|
1c84ea6d51625d2d66b3eef1d9a7cc9a87c99e0e
|
[
"Apache-2.0"
] | 454
|
2020-11-21T01:02:45.000Z
|
2022-03-29T12:53:40.000Z
|
apps/drug_target_interaction/sman/metrics.py
|
chupvl/PaddleHelix
|
6e082f89b8090c3c360593d40a08bffc884165dd
|
[
"Apache-2.0"
] | 161
|
2020-12-12T06:35:54.000Z
|
2022-03-27T11:31:13.000Z
|
apps/drug_target_interaction/sman/metrics.py
|
chupvl/PaddleHelix
|
6e082f89b8090c3c360593d40a08bffc884165dd
|
[
"Apache-2.0"
] | 108
|
2020-12-07T09:01:10.000Z
|
2022-03-31T14:42:29.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implement several metrics.
"""
import numpy as np
from math import sqrt
from scipy import stats
from sklearn.linear_model import LinearRegression
def rmse(y,f):
rmse = sqrt(((y - f)**2).mean(axis=0))
return rmse
def mae(y,f):
mae = (np.abs(y-f)).mean()
return mae
def sd(y,f):
f,y = f.reshape(-1,1),y.reshape(-1,1)
lr = LinearRegression()
lr.fit(f,y)
y_ = lr.predict(f)
sd = (((y - y_) ** 2).sum() / (len(y) - 1)) ** 0.5
return sd
def mse(y,f):
mse = ((y - f)**2).mean(axis=0)
return mse
def pearson(y,f):
rp = np.corrcoef(y, f)[0,1]
return rp
def spearman(y,f):
rs = stats.spearmanr(y, f)[0]
return rs
def ci(y,f):
ind = np.argsort(y)
y = y[ind]
f = f[ind]
i = len(y)-1
j = i-1
z = 0.0
S = 0.0
while i > 0:
while j >= 0:
if y[i] > y[j]:
z = z+1
u = f[i] - f[j]
if u > 0:
S = S + 1
elif u == 0:
S = S + 0.5
j = j - 1
i = i - 1
j = i-1
ci = S/z
return ci
| 23.90411
| 74
| 0.558166
|
b328322788cbfca9d9bffe46d26a1da6db094786
| 32,933
|
py
|
Python
|
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
|
fossabot/turicreate
|
a500d5e52143ad15ebdf771d9f74198982c7c45c
|
[
"BSD-3-Clause"
] | 1
|
2019-04-16T19:51:18.000Z
|
2019-04-16T19:51:18.000Z
|
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
|
tashby/turicreate
|
7f07ce795833d0c56c72b3a1fb9339bed6d178d1
|
[
"BSD-3-Clause"
] | 3
|
2021-09-08T02:18:00.000Z
|
2022-03-12T00:39:44.000Z
|
src/unity/python/turicreate/toolkits/topic_model/topic_model.py
|
tashby/turicreate
|
7f07ce795833d0c56c72b3a1fb9339bed6d178d1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Methods for creating a topic model and predicting the topics of new documents.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _turicreate
from turicreate.toolkits._model import Model as _Model
from turicreate.data_structures.sframe import SFrame as _SFrame
from turicreate.data_structures.sarray import SArray as _SArray
from turicreate.toolkits.text_analytics._util import _check_input
from turicreate.toolkits.text_analytics._util import random_split as _random_split
from turicreate.toolkits._internal_utils import _check_categorical_option_type, \
_map_unity_proxy_to_object, \
_precomputed_field, \
_toolkit_repr_print
import sys as _sys
if _sys.version_info.major == 3:
_izip = zip
_xrange = range
else:
from itertools import izip as _izip
_xrange = xrange
import operator as _operator
import array as _array
def create(dataset,
num_topics=10,
initial_topics=None,
alpha=None,
beta=.1,
num_iterations=10,
num_burnin=5,
associations=None,
verbose=False,
print_interval=10,
validation_set=None,
method='auto'):
"""
Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity']
"""
dataset = _check_input(dataset)
_check_categorical_option_type("method", method, ['auto', 'cgs', 'alias'])
if method == 'cgs' or method == 'auto':
model_name = 'cgs_topic_model'
else:
model_name = 'alias_topic_model'
# If associations are provided, check they are in the proper format
if associations is None:
associations = _turicreate.SFrame({'word': [], 'topic': []})
if isinstance(associations, _turicreate.SFrame) and \
associations.num_rows() > 0:
assert set(associations.column_names()) == set(['word', 'topic']), \
"Provided associations must be an SFrame containing a word column\
and a topic column."
assert associations['word'].dtype == str, \
"Words must be strings."
assert associations['topic'].dtype == int, \
"Topic ids must be of int type."
if alpha is None:
alpha = float(50) / num_topics
if validation_set is not None:
_check_input(validation_set) # Must be a single column
if isinstance(validation_set, _turicreate.SFrame):
column_name = validation_set.column_names()[0]
validation_set = validation_set[column_name]
(validation_train, validation_test) = _random_split(validation_set)
else:
validation_train = _SArray()
validation_test = _SArray()
opts = {'model_name': model_name,
'data': dataset,
'num_topics': num_topics,
'num_iterations': num_iterations,
'print_interval': print_interval,
'alpha': alpha,
'beta': beta,
'num_burnin': num_burnin,
'associations': associations}
# Initialize the model with basic parameters
response = _turicreate.toolkits._main.run("text_topicmodel_init", opts)
m = TopicModel(response['model'])
# If initial_topics provided, load it into the model
if isinstance(initial_topics, _turicreate.SFrame):
assert set(['vocabulary', 'topic_probabilities']) == \
set(initial_topics.column_names()), \
"The provided initial_topics does not have the proper format, \
e.g. wrong column names."
observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x))
assert all(observed_topics == num_topics), \
"Provided num_topics value does not match the number of provided initial_topics."
# Rough estimate of total number of words
weight = len(dataset) * 1000
opts = {'model': m.__proxy__,
'topics': initial_topics['topic_probabilities'],
'vocabulary': initial_topics['vocabulary'],
'weight': weight}
response = _turicreate.toolkits._main.run("text_topicmodel_set_topics", opts)
m = TopicModel(response['model'])
# Train the model on the given data set and retrieve predictions
opts = {'model': m.__proxy__,
'data': dataset,
'verbose': verbose,
'validation_train': validation_train,
'validation_test': validation_test}
response = _turicreate.toolkits._main.run("text_topicmodel_train", opts)
m = TopicModel(response['model'])
return m
class TopicModel(_Model):
"""
TopicModel objects can be used to predict the underlying topic of a
document.
This model cannot be constructed directly. Instead, use
:func:`turicreate.topic_model.create` to create an instance
of this model. A detailed list of parameter options and code samples
are available in the documentation for the create function.
"""
def __init__(self, model_proxy):
self.__proxy__ = model_proxy
@classmethod
def _native_name(cls):
return ["cgs_topic_model", "alias_topic_model"]
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the model.
"""
return self.__repr__()
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where relevant)
the schema of the training data, description of the training data,
training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
section_titles=['Schema','Settings']
vocab_length = len(self.vocabulary)
verbose = self.verbose == 1
sections=[
[
('Vocabulary Size',_precomputed_field(vocab_length))
],
[
('Number of Topics', 'num_topics'),
('alpha','alpha'),
('beta','beta'),
('Iterations', 'num_iterations'),
('Training time', 'training_time'),
('Verbose', _precomputed_field(verbose))
]
]
return (sections, section_titles)
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
key_str = "{:<{}}: {}"
width = 30
(sections, section_titles) = self._get_summary_struct()
out = _toolkit_repr_print(self, sections, section_titles, width=width)
extra = []
extra.append(key_str.format("Accessible fields", width, ""))
extra.append(key_str.format("m.topics",width,"An SFrame containing the topics."))
extra.append(key_str.format("m.vocabulary",width,"An SArray containing the words in the vocabulary."))
extra.append(key_str.format("Useful methods", width, ""))
extra.append(key_str.format("m.get_topics()",width,"Get the most probable words per topic."))
extra.append(key_str.format("m.predict(new_docs)",width,"Make predictions for new documents."))
return out + '\n' + '\n'.join(extra)
def _get(self, field):
"""
Return the value of a given field. The list of all queryable fields is
detailed below, and can be obtained with the
:py:func:`~TopicModel._list_fields` method.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| topics | An SFrame containing a column with the unique|
| | words observed during training, and a column |
| | of arrays containing the probability values |
| | for each word given each of the topics. |
+-----------------------+----------------------------------------------+
| vocabulary | An SArray containing the words used. This is |
| | same as the vocabulary column in the topics |
| | field above. |
+-----------------------+----------------------------------------------+
Parameters
----------
field : string
Name of the field to be retrieved.
Returns
-------
out
Value of the requested field.
"""
opts = {'model': self.__proxy__, 'field': field}
response = _turicreate.toolkits._main.run("text_topicmodel_get_value", opts)
if field == 'vocabulary':
return _SArray(None, _proxy=response['value'])
elif field == 'topics':
return _SFrame(None, _proxy=response['value'])
return response['value']
def _training_stats(self):
"""
Return a dictionary of statistics collected during creation of the
model. These statistics are also available with the ``get`` method and
are described in more detail in that method's documentation.
Returns
-------
out : dict
Dictionary of statistics compiled during creation of the
TopicModel.
See Also
--------
summary
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> m._training_stats()
{'training_iterations': 20,
'training_time': 20.5034}
"""
fields = self._list_fields()
stat_fields = ['training_time',
'training_iterations']
if 'validation_perplexity' in fields:
stat_fields.append('validation_perplexity')
ret = {k : self._get(k) for k in stat_fields}
return ret
def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0,
output_type='topic_probabilities'):
"""
Get the words associated with a given topic. The score column is the
probability of choosing that word given that you have chosen a
particular topic.
Parameters
----------
topic_ids : list of int, optional
The topics to retrieve words. Topic ids are zero-based.
Throws an error if greater than or equal to m['num_topics'], or
if the requested topic name is not present.
num_words : int, optional
The number of words to show.
cdf_cutoff : float, optional
Allows one to only show the most probable words whose cumulative
probability is below this cutoff. For example if there exist
three words where
.. math::
p(word_1 | topic_k) = .1
p(word_2 | topic_k) = .2
p(word_3 | topic_k) = .05
then setting :math:`cdf_{cutoff}=.3` would return only
:math:`word_1` and :math:`word_2` since
:math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}`
output_type : {'topic_probabilities' | 'topic_words'}, optional
Determine the type of desired output. See below.
Returns
-------
out : SFrame
If output_type is 'topic_probabilities', then the returned value is
an SFrame with a column of words ranked by a column of scores for
each topic. Otherwise, the returned value is a SArray where
each element is a list of the most probable words for each topic.
Examples
--------
Get the highest ranked words for all topics.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs,
num_iterations=50)
>>> m.get_topics()
+-------+----------+-----------------+
| topic | word | score |
+-------+----------+-----------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 1 | function | 0.0482834508265 |
| 1 | input | 0.0456270024091 |
| 1 | point | 0.0302662839454 |
| 1 | result | 0.0239474934631 |
| 1 | problem | 0.0231750116011 |
| ... | ... | ... |
+-------+----------+-----------------+
Get the highest ranked words for topics 0 and 1 and show 15 words per
topic.
>>> m.get_topics([0, 1], num_words=15)
+-------+----------+------------------+
| topic | word | score |
+-------+----------+------------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 0 | response | 0.0139740298286 |
| 0 | layer | 0.0122585145062 |
| 0 | features | 0.0115343177265 |
| 0 | feature | 0.0103530459301 |
| 0 | spatial | 0.00823387994361 |
| ... | ... | ... |
+-------+----------+------------------+
If one wants to instead just get the top words per topic, one may
change the format of the output as follows.
>>> topics = m.get_topics(output_type='topic_words')
dtype: list
Rows: 10
[['cell', 'image', 'input', 'object', 'visual'],
['algorithm', 'data', 'learning', 'method', 'set'],
['function', 'input', 'point', 'problem', 'result'],
['model', 'output', 'pattern', 'set', 'unit'],
['action', 'learning', 'net', 'problem', 'system'],
['error', 'function', 'network', 'parameter', 'weight'],
['information', 'level', 'neural', 'threshold', 'weight'],
['control', 'field', 'model', 'network', 'neuron'],
['hidden', 'layer', 'system', 'training', 'vector'],
['component', 'distribution', 'local', 'model', 'optimal']]
"""
_check_categorical_option_type('output_type', output_type,
['topic_probabilities', 'topic_words'])
if topic_ids is None:
topic_ids = list(range(self._get('num_topics')))
assert isinstance(topic_ids, list), \
"The provided topic_ids is not a list."
if any([type(x) == str for x in topic_ids]):
raise ValueError("Only integer topic_ids can be used at this point in time.")
if not all([x >= 0 and x < self.num_topics for x in topic_ids]):
raise ValueError("Topic id values must be non-negative and less than the " + \
"number of topics used to fit the model.")
opts = {'model': self.__proxy__,
'topic_ids': topic_ids,
'num_words': num_words,
'cdf_cutoff': cdf_cutoff}
response = _turicreate.toolkits._main.run('text_topicmodel_get_topic',
opts)
ret = _map_unity_proxy_to_object(response['top_words'])
def sort_wordlist_by_prob(z):
words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True)
return [word for (word, prob) in words]
if output_type != 'topic_probabilities':
ret = ret.groupby('topic',
{'word': _turicreate.aggregate.CONCAT('word', 'score')})
words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob)
ret = _SFrame({'words': words})
return ret
def predict(self, dataset, output_type='assignment', num_burnin=None):
"""
Use the model to predict topics for each document. The provided
`dataset` should be an SArray object where each element is a dict
representing a single document in bag-of-words format, where keys
are words and values are their corresponding counts. If `dataset` is
an SFrame, then it must contain a single column of dict type.
The current implementation will make inferences about each document
given its estimates of the topics learned when creating the model.
This is done via Gibbs sampling.
Parameters
----------
dataset : SArray, SFrame of type dict
A set of documents to use for making predictions.
output_type : str, optional
The type of output desired. This can either be
- assignment: the returned values are integers in [0, num_topics)
- probability: each returned prediction is a vector with length
num_topics, where element k prepresents the probability that
document belongs to topic k.
num_burnin : int, optional
The number of iterations of Gibbs sampling to perform when
inferring the topics for documents at prediction time.
If provided this will override the burnin value set during
training.
Returns
-------
out : SArray
See Also
--------
evaluate
Examples
--------
Make predictions about which topic each document belongs to.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> pred = m.predict(docs)
If one is interested in the probability of each topic
>>> pred = m.predict(docs, output_type='probability')
Notes
-----
For each unique word w in a document d, we sample an assignment to
topic k with probability proportional to
.. math::
p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k}
where
- :math:`W` is the size of the vocabulary,
- :math:`n_{d,k}` is the number of other times we have assigned a word in
document to d to topic :math:`k`,
- :math:`\Phi_{w,k}` is the probability under the model of choosing word
:math:`w` given the word is of topic :math:`k`. This is the matrix
returned by calling `m['topics']`.
This represents a collapsed Gibbs sampler for the document assignments
while we keep the topics learned during training fixed.
This process is done in parallel across all documents, five times per
document.
"""
dataset = _check_input(dataset)
if num_burnin is None:
num_burnin = self.num_burnin
opts = {'model': self.__proxy__,
'data': dataset,
'num_burnin': num_burnin}
response = _turicreate.toolkits._main.run("text_topicmodel_predict", opts)
preds = _SArray(None, _proxy=response['predictions'])
# Get most likely topic if probabilities are not requested
if output_type not in ['probability', 'probabilities', 'prob']:
# equivalent to numpy.argmax(x)
preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1])
return preds
def evaluate(self, train_data, test_data=None, metric='perplexity'):
"""
Estimate the model's ability to predict new data. Imagine you have a
corpus of books. One common approach to evaluating topic models is to
train on the first half of all of the books and see how well the model
predicts the second half of each book.
This method returns a metric called perplexity, which is related to the
likelihood of observing these words under the given model. See
:py:func:`~turicreate.topic_model.perplexity` for more details.
The provided `train_data` and `test_data` must have the same length,
i.e., both data sets must have the same number of documents; the model
will use train_data to estimate which topic the document belongs to, and
this is used to estimate the model's performance at predicting the
unseen words in the test data.
See :py:func:`~turicreate.topic_model.TopicModel.predict` for details
on how these predictions are made, and see
:py:func:`~turicreate.text_analytics.random_split` for a helper function
that can be used for making train/test splits.
Parameters
----------
train_data : SArray or SFrame
A set of documents to predict topics for.
test_data : SArray or SFrame, optional
A set of documents to evaluate performance on.
By default this will set to be the same as train_data.
metric : str
The chosen metric to use for evaluating the topic model.
Currently only 'perplexity' is supported.
Returns
-------
out : dict
The set of estimated evaluation metrics.
See Also
--------
predict, turicreate.toolkits.text_analytics.random_split
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = turicreate.topic_model.create(train_data)
>>> m.evaluate(train_data, test_data)
{'perplexity': 2467.530370396021}
"""
train_data = _check_input(train_data)
if test_data is None:
test_data = train_data
else:
test_data = _check_input(test_data)
predictions = self.predict(train_data, output_type='probability')
topics = self.topics
ret = {}
ret['perplexity'] = perplexity(test_data,
predictions,
topics['topic_probabilities'],
topics['vocabulary'])
return ret
@classmethod
def _get_queryable_methods(cls):
'''Returns a list of method names that are queryable through Predictive
Service'''
return {'predict':{'dataset':'sarray'}}
def perplexity(test_data, predictions, topics, vocabulary):
"""
Compute the perplexity of a set of test documents given a set
of predicted topics.
Let theta be the matrix of document-topic probabilities, where
theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic
probabilities, where phi_jk = p(word j | topic k).
Then for each word in each document, we compute for a given word w
and document d
.. math::
p(word | \theta[doc_id,:], \phi[word_id,:]) =
\sum_k \theta[doc_id, k] * \phi[word_id, k]
We compute loglikelihood to be:
.. math::
l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi)
and perplexity to be
.. math::
\exp \{ - l(D) / \sum_i \sum_j count_{i,j} \}
Parameters
----------
test_data : SArray of type dict or SFrame with a single colum of type dict
Documents in bag-of-words format.
predictions : SArray
An SArray of vector type, where each vector contains estimates of the
probability that this document belongs to each of the topics.
This must have the same size as test_data; otherwise an exception
occurs. This can be the output of
:py:func:`~turicreate.topic_model.TopicModel.predict`, for example.
topics : SFrame
An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'.
The value returned by m['topics'] is a valid input for this argument,
where m is a trained :py:class:`~turicreate.topic_model.TopicModel`.
vocabulary : SArray
An SArray of words to use. All words in test_data that are not in this
vocabulary will be ignored.
Notes
-----
For more details, see equations 13-16 of [PattersonTeh2013].
References
----------
.. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_
.. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian
Langevin Dynamics on the Probability Simplex"
<http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_
NIPS, 2013.
Examples
--------
>>> from turicreate import topic_model
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = topic_model.create(train_data)
>>> pred = m.predict(train_data)
>>> topics = m['topics']
>>> p = topic_model.perplexity(test_data, pred,
topics['topic_probabilities'],
topics['vocabulary'])
>>> p
1720.7 # lower values are better
"""
test_data = _check_input(test_data)
assert isinstance(predictions, _SArray), \
"Predictions must be an SArray of vector type."
assert predictions.dtype == _array.array, \
"Predictions must be probabilities. Try using m.predict() with " + \
"output_type='probability'."
opts = {'test_data': test_data,
'predictions': predictions,
'topics': topics,
'vocabulary': vocabulary}
response = _turicreate.toolkits._main.run("text_topicmodel_get_perplexity",
opts)
return response['perplexity']
| 39.48801
| 110
| 0.582698
|
72b9f9fecf8548643a9b6d1e938fb80d5005be97
| 1,304
|
py
|
Python
|
diayn/policies/diayn_policy.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
diayn/policies/diayn_policy.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 6
|
2021-02-02T23:00:02.000Z
|
2022-01-13T03:13:51.000Z
|
diayn/policies/diayn_policy.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | null | null | null |
import numpy as np
from diayn_cont.policy.skill_policy_obs_dim_select import SkillTanhGaussianPolicyObsSelect
from rlkit.policies.base import Policy
class SkillTanhGaussianPolicyObsSelectDIAYN(SkillTanhGaussianPolicyObsSelect):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._skill = 0
@property
def skill(self):
return self._skill
@skill.setter
def skill(self, skill_to_set):
assert isinstance(skill_to_set, int)
assert skill_to_set < self.skill_dim
self._skill = skill_to_set
class MakeDeterministic(Policy):
def __init__(self,
stochastic_policy: SkillTanhGaussianPolicyObsSelectDIAYN):
self.stochastic_policy = stochastic_policy
def get_action(self,
obs_np: np.ndarray,
deterministic: bool = None):
assert deterministic is None
return self.stochastic_policy.get_action(
obs_np,
deterministic=True
)
@property
def skill_dim(self):
return self.stochastic_policy.skill_dim
@property
def skill(self):
return self.stochastic_policy.skill
@skill.setter
def skill(self, skill_to_set):
self.stochastic_policy.skill = skill_to_set
| 25.076923
| 90
| 0.67408
|
776e847d3eb72eec215f75615acef2f5b20f6386
| 3,758
|
py
|
Python
|
animate_flex.py
|
martin-rdz/trace_airmass_source
|
0b850d6dcfc33ebc6fe19a0c0898ab3e7d5693de
|
[
"MIT"
] | 3
|
2020-12-11T10:38:06.000Z
|
2021-03-07T11:06:16.000Z
|
animate_flex.py
|
martin-rdz/trace_airmass_source
|
0b850d6dcfc33ebc6fe19a0c0898ab3e7d5693de
|
[
"MIT"
] | 3
|
2020-12-03T15:50:14.000Z
|
2021-04-06T08:20:30.000Z
|
animate_flex.py
|
martin-rdz/trace_airmass_source
|
0b850d6dcfc33ebc6fe19a0c0898ab3e7d5693de
|
[
"MIT"
] | 2
|
2021-03-03T02:17:01.000Z
|
2022-02-07T16:52:21.000Z
|
#! /usr/bin/env python3
# coding=utf-8
""""""
"""
Author: radenz@tropos.de
"""
import datetime
import argparse
import sys, os
import gc
import subprocess
import traceback
import numpy as np
import toml
sys.path.append("..")
import trace_source
parser = argparse.ArgumentParser()
parser.add_argument('--station', help='station name like limassol, barbados or mcmurdo')
parser.add_argument('--datetime', help='date in the format YYYYMMDD-HH')
parser.add_argument('--levels', nargs='+', type=int)
parser.add_argument('--dynamics', default='false', help='add the isobars/isoterms from the grib files')
#parser.add_argument('--daterange', help='date range in the format YYYYMMDD-YYYMMDD')
args = parser.parse_args()
config_file = 'config_{}.toml'.format(args.station)
with open(config_file) as f:
config = toml.loads(f.read())
end = datetime.datetime.strptime(args.datetime, '%Y%m%d-%H')
savepath = '{}/{}_maps'.format(config['plot_dir'], end.strftime('%Y%m%d_%H'))
print("savepath ", savepath)
folder = config['partposit_dir'] + '{}/'.format(end.strftime('%Y%m%d_%H'))
print('partposit_dir', folder)
dt_range = [end-datetime.timedelta(days=10), end]
files = os.listdir(folder)
files = sorted([f for f in files if 'partposit' in f])
ls = trace_source.land_sfc.land_sfc()
if args.levels is not None:
levels = args.levels
else:
# get levels from config file
raise ValueError
print('levels ', args.levels)
if args.dynamics == 'false':
add_dyn = False
elif args.dynamics == 'true':
add_dyn = True
else:
raise ValueError
level_to_heights = {}
for f in files[:]:
for i in levels:
dt = datetime.datetime.strptime(f[10:], '%Y%m%d%H%M%S')
part_pos = trace_source.flexpart.read_partpositions(folder + f, 1, ctable=False)
traj = trace_source.flexpart.read_flexpart_traj_meta(folder + "trajectories.txt")
level_to_heights[i] = np.mean(traj['releases_meta'][i]['heights'])
trace_source.flexpart.plot_part_loc_map(part_pos, i, dt, traj, savepath, ls=ls,
config=config,
add_dyn=add_dyn,
)
#add_fire='M6_7452')
gc.collect()
os.chdir(savepath)
print(os.getcwd())
for i in levels:
fname_animation = "{}_{:.0f}_r{:0>2}_{}.gif".format(end.strftime('%Y%m%d_%H'), level_to_heights[i], i, args.station)
command = "convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r{:0>2}*.png | sort -r` {}".format(i, fname_animation)
print('run: ', command)
try:
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
except:
traceback.print_exc()
fname_animation = "{}_{:.0f}_r{:0>2}_{}_f.gif".format(end.strftime('%Y%m%d_%H'), level_to_heights[i], i, args.station)
command = "convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r{:0>2}*.png | sort ` {}".format(i, fname_animation)
print('run: ', command)
try:
process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
except:
traceback.print_exc()
# from flexpart module
# convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
# from notebook
# convert -delay 20 -loop 0 `ls r2*.png | sort -r` r2.gif
# convert -resize 1500x1000 -delay 20 -loop 0 `ls r4*.png | sort -r` r4.gif
# convert -scale 50% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
# convert -scale 70% -coalesce -layers Optimize -delay 20 -loop 0 `ls r11*.png | sort -r` r11.gif
| 33.553571
| 137
| 0.644758
|
e1b6524a7d7500c72a6ffb0bf4b324174c35d36c
| 660
|
py
|
Python
|
examples/plotting/file/linked_brushing.py
|
andreagrant/bokehDev
|
a684afee183496c54d4f187a890707cf6b5ec2a5
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T13:05:55.000Z
|
2021-04-03T13:05:55.000Z
|
examples/plotting/file/linked_brushing.py
|
andreagrant/bokehDev
|
a684afee183496c54d4f187a890707cf6b5ec2a5
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plotting/file/linked_brushing.py
|
andreagrant/bokehDev
|
a684afee183496c54d4f187a890707cf6b5ec2a5
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure, gridplot, show, output_file
N = 300
x = np.linspace(0, 4*np.pi, N)
y1 = np.sin(x)
y2 = np.cos(x)
source = ColumnDataSource(data=dict(x=x, y1=y1, y2=y2))
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select"
s1 = figure(tools=TOOLS, title="Figure 1", min_border=5)
s1.circle('x', 'y1', source=source)
s2 = figure(tools=TOOLS)
# linked brushing is expressed by sharing data sources between renderers
s2.circle('x', 'y2', source=source)
p = gridplot([[s1,s2]])
output_file("linked_brushing.html", title="linked_brushing.py example")
show(p)
| 22.758621
| 72
| 0.730303
|
17a3d8f982232d537ecc9171ac1945c9c6316042
| 1,686
|
py
|
Python
|
minmax.py
|
lkesteloot/verilog_minmax
|
f6d774cd8a823d2382c86a349b4385545483fc99
|
[
"Apache-2.0"
] | 1
|
2018-05-13T20:54:18.000Z
|
2018-05-13T20:54:18.000Z
|
minmax.py
|
lkesteloot/verilog_minmax
|
f6d774cd8a823d2382c86a349b4385545483fc99
|
[
"Apache-2.0"
] | null | null | null |
minmax.py
|
lkesteloot/verilog_minmax
|
f6d774cd8a823d2382c86a349b4385545483fc99
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2017 Lawrence Kesteloot
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generates Verilog code to compute the min or max of a set of registers or
# constants. Specify the operator and all the operands on the command line.
#
# Examples:
#
# python minmax.py "<" vertex_0_x vertex_1_x vertex_2_x
# python minmax.py ">" vertex_0_x vertex_1_x vertex_2_x
#
import sys
INDENT = " "
# Generate the sub-tree with the given indent, operator, and operands.
def generate(indent, operator, operands):
if len(operands) == 0:
raise Exception("Should never have zero operands")
if len(operands) == 1:
return operands[0]
return "%s %s %s\n%s? %s\n%s: %s" % (operands[0], operator, operands[1],
indent,
generate(indent + INDENT, operator, [operands[0]] + operands[2:]),
indent,
generate(indent + INDENT, operator, [operands[1]] + operands[2:]))
def main(args):
if len(args) < 2:
print "Usage: minmax.py operator a b c . . ."
sys.exit(1)
print "result <= " + generate(INDENT, args[0], args[1:]) + ";"
main(sys.argv[1:])
| 32.423077
| 78
| 0.658363
|
f361983656edd9ac8d26c35bf68e545514e019ec
| 1,434
|
py
|
Python
|
noxfile.py
|
kjelljorner/libconeangle
|
52ef8f38d41a8d6feea0869f457a85598cf6e59b
|
[
"MIT"
] | 8
|
2022-03-21T07:02:14.000Z
|
2022-03-31T22:58:32.000Z
|
noxfile.py
|
kjelljorner/libconeangle
|
52ef8f38d41a8d6feea0869f457a85598cf6e59b
|
[
"MIT"
] | 1
|
2022-03-26T10:24:32.000Z
|
2022-03-26T14:12:02.000Z
|
noxfile.py
|
kjelljorner/libconeangle
|
52ef8f38d41a8d6feea0869f457a85598cf6e59b
|
[
"MIT"
] | null | null | null |
"""Automated testing linting and formatting apparatus."""
import nox
from nox.sessions import Session
package = "libconeangle"
nox.options.sessions = "lint", "tests", "mypy" # default session
locations = "libconeangle", "test", "noxfile.py" # Linting locations
pyversions = ["3.8", "3.9", "3.10"]
# Testing
@nox.session(python=pyversions)
def tests(session: Session) -> None:
"""Run tests."""
args = session.posargs + ["--cov=libconeangle", "--import-mode=importlib", "-s"]
session.install("pytest", "pytest-cov")
session.install(".")
session.run("pytest", *args)
# Linting
@nox.session(python="3.9")
def lint(session: Session) -> None:
"""Lint code."""
args = session.posargs or locations
session.install(
"flake8",
"flake8-black",
"flake8-bugbear",
"flake8-import-order",
"flake8-annotations",
"flake8-docstrings",
"darglint",
)
session.run("flake8", *args)
# Code formatting
@nox.session(python="3.9")
def black(session: Session) -> None:
"""Format code."""
args = session.posargs or locations
session.install("black")
session.run("black", *args)
# Static typing
@nox.session(python="3.9")
def mypy(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or locations
session.install("mypy")
session.install("types-pkg_resources")
session.run("mypy", *args)
| 25.607143
| 84
| 0.638075
|
201bf1453b1c34e9762b94dece9cbb1570bcd514
| 4,205
|
py
|
Python
|
userbot/modules/emojigames.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2022-03-03T01:31:48.000Z
|
2022-03-26T00:15:41.000Z
|
userbot/modules/emojigames.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-16T02:54:27.000Z
|
2022-03-17T09:17:12.000Z
|
userbot/modules/emojigames.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-16T02:41:38.000Z
|
2022-03-16T02:41:38.000Z
|
# fix by @heyworld for OUB
# bug fixed by @d3athwarrior
# Recode by @divarvian
# t.me/MikooUserbot
from telethon.tl.types import InputMediaDice
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import man_cmd
@man_cmd(pattern="dice(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice(""))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice(""))
except BaseException:
pass
@man_cmd(pattern="dart(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice("🎯"))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice("🎯"))
except BaseException:
pass
@man_cmd(pattern="basket(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice("🏀"))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice("🏀"))
except BaseException:
pass
@man_cmd(pattern="bowling(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice("🎳"))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice("🎳"))
except BaseException:
pass
@man_cmd(pattern="ball(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice("⚽"))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice("⚽"))
except BaseException:
pass
@man_cmd(pattern="jackpot(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
await event.delete()
r = await event.reply(file=InputMediaDice("🎰"))
if input_str:
try:
required_number = int(input_str)
while r.media.value != required_number:
await r.delete()
r = await event.reply(file=InputMediaDice("🎰"))
except BaseException:
pass
CMD_HELP.update(
{
"emojigames": f"**Plugin : **`emojigames`\
\n\n • **Syntax :** `{cmd}dice` 1-6\
\n • **Function : **Memainkan emoji game dice dengan score yg di tentukan kita.\
\n\n • **Syntax :** `{cmd}dart` 1-6\
\n • **Function : **Memainkan emoji game dart dengan score yg di tentukan kita.\
\n\n • **Syntax :** `{cmd}basket` 1-5\
\n • **Function : **Memainkan emoji game basket dengan score yg di tentukan kita.\
\n\n • **Syntax :** `{cmd}bowling` 1-6\
\n • **Function : **Memainkan emoji game bowling dengan score yg di tentukan kita.\
\n\n • **Syntax :** `{cmd}ball` 1-5\
\n • **Function : **Memainkan emoji game ball telegram score yg di tentukan kita.\
\n\n • **Syntax :** `{cmd}jackpot` 1\
\n • **Function : **Memainkan emoji game jackpot dengan score yg di tentukan kita.\
\n\n • **NOTE: **Jangan gunakan nilai lebih atau bot akan Crash**\
"
}
)
| 31.380597
| 93
| 0.575981
|
ca03532eed4d8af744b0ba9f8bb14c25db6d08d4
| 2,308
|
py
|
Python
|
stem_testing/examples/client_usage/Custom_Path_Selection.py
|
joshuatee/Posh-Stem
|
bc7f1d4e010e8a7f1e1e4dd70518ec6b1d210fa0
|
[
"Unlicense"
] | 1
|
2019-04-02T00:15:59.000Z
|
2019-04-02T00:15:59.000Z
|
stem_testing/examples/client_usage/Custom_Path_Selection.py
|
joshuatee/Posh-Stem
|
bc7f1d4e010e8a7f1e1e4dd70518ec6b1d210fa0
|
[
"Unlicense"
] | null | null | null |
stem_testing/examples/client_usage/Custom_Path_Selection.py
|
joshuatee/Posh-Stem
|
bc7f1d4e010e8a7f1e1e4dd70518ec6b1d210fa0
|
[
"Unlicense"
] | null | null | null |
import StringIO
import time
import pycurl
import stem.control
# Static exit for us to make 2-hop circuits through. Picking aurora, a
# particularly beefy one...
#
# https://atlas.torproject.org/#details/379FB450010D17078B3766C2273303C358C3A442
EXIT_FINGERPRINT = '379FB450010D17078B3766C2273303C358C3A442'
SOCKS_PORT = 9050
CONNECTION_TIMEOUT = 30 # timeout before we give up on a circuit
def query(url):
"""
Uses pycurl to fetch a site using the proxy on the SOCKS_PORT.
"""
output = StringIO.StringIO()
query = pycurl.Curl()
query.setopt(pycurl.URL, url)
query.setopt(pycurl.PROXY, 'localhost')
query.setopt(pycurl.PROXYPORT, SOCKS_PORT)
query.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5_HOSTNAME)
query.setopt(pycurl.CONNECTTIMEOUT, CONNECTION_TIMEOUT)
query.setopt(pycurl.WRITEFUNCTION, output.write)
try:
query.perform()
return output.getvalue()
except pycurl.error as exc:
raise ValueError("Unable to reach %s (%s)" % (url, exc))
def scan(controller, path):
"""
Fetch check.torproject.org through the given path of relays, providing back
the time it took.
"""
circuit_id = controller.new_circuit(path, await_build = True)
def attach_stream(stream):
if stream.status == 'NEW':
controller.attach_stream(stream.id, circuit_id)
controller.add_event_listener(attach_stream, stem.control.EventType.STREAM)
try:
controller.set_conf('__LeaveStreamsUnattached', '1') # leave stream management to us
start_time = time.time()
check_page = query('https://check.torproject.org/')
if 'Congratulations. This browser is configured to use Tor.' not in check_page:
raise ValueError("Request didn't have the right content")
return time.time() - start_time
finally:
controller.remove_event_listener(attach_stream)
controller.reset_conf('__LeaveStreamsUnattached')
with stem.control.Controller.from_port() as controller:
controller.authenticate()
relay_fingerprints = [desc.fingerprint for desc in controller.get_network_statuses()]
for fingerprint in relay_fingerprints:
try:
time_taken = scan(controller, [fingerprint, EXIT_FINGERPRINT])
print('%s => %0.2f seconds' % (fingerprint, time_taken))
except Exception as exc:
print('%s => %s' % (fingerprint, exc))
| 29.21519
| 89
| 0.737002
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.