hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f381167e8d010fda04299377952b97149e058591
| 8,501
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
helloxms/autonomous_ros
|
860fb5b759eb2c7981da17c12ac907cceee870b3
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
helloxms/autonomous_ros
|
860fb5b759eb2c7981da17c12ac907cceee870b3
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
helloxms/autonomous_ros
|
860fb5b759eb2c7981da17c12ac907cceee870b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
from scipy.spatial import KDTree
import math
import tf
import cv2
import yaml
STATE_COUNT_THRESHOLD = 3 # how many times light state must be stable before publishing.
LIGHT_DISTANCE_THRESHOLD = 30 # at how many meters to light waypoint we start checking light state.
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.on_simulator = rospy.get_param('~on_simulator')
self.use_ground_truth = False
self.pose = None
self.waypoints = None
self.camera_image = None
self.camera_image_count = 1
self.lights = []
self.waypoint_tree = None
self.waypoints_2d = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier(self.on_simulator)
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
#rospy.logwarn(self.waypoint_tree)
def traffic_cb(self, msg):
self.lights = msg.lights
#rospy.loginfo("tl_detector::traffic_cb: {0}".format(self.lights))
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
# only process every n-th image to improve performance.
if self.camera_image_count < 10:
self.camera_image_count += 1
self.has_image = False
self.camera_image = None
else:
self.camera_image_count = 1
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x,y],1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# For testing, just return the light state
# return light.state
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
# #Get classification
# return self.light_classifier.get_classification(cv_image)
if self.use_ground_truth:
rospy.loginfo("debugging, using ground truth")
return light.state
if (not self.has_image):
# rospy.loginfo("no image info!")
self.prev_light_loc = None
return TrafficLight.RED
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
##Get classification
# return self.light_classifier.get_classification(cv_image)
pred_state = self.light_classifier.get_classification(cv_image)
rospy.loginfo("SSD network says: %s (wright answer is %s)", pred_state, light.state)
return pred_state
def euclidian_distance(self, position1, position2):
x, y, z = position1.x - position2.x, position1.y - position2.y, position1.z - position2.z
return math.sqrt(x*x + y*y + z*z)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d>=0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
# check the distance from car to closest_light
closest_light_distance = self.euclidian_distance(self.pose.pose.position, self.waypoints.waypoints[line_wp_idx].pose.pose.position)
if closest_light and closest_light_distance < LIGHT_DISTANCE_THRESHOLD:
state = self.get_light_state(closest_light) # approaching a light, try to determine its state
rospy.loginfo("approaching %s traffic light %f ahead", state, closest_light_distance)
if state == TrafficLight.RED:
return line_wp_idx, state
else:
return -1,TrafficLight.UNKNOWN
else: # far away from light, hence state is don't care.
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 38.292793
| 147
| 0.642748
|
2bf73bafd3c92ffdaeb1dcf60d565aa015704801
| 589
|
py
|
Python
|
extendedAPIs/utils/find_transaction_objects.py
|
ttw225/IOTA_learning
|
90b804765b9250c349dab7db8a06144cdcbdb52d
|
[
"MIT"
] | null | null | null |
extendedAPIs/utils/find_transaction_objects.py
|
ttw225/IOTA_learning
|
90b804765b9250c349dab7db8a06144cdcbdb52d
|
[
"MIT"
] | null | null | null |
extendedAPIs/utils/find_transaction_objects.py
|
ttw225/IOTA_learning
|
90b804765b9250c349dab7db8a06144cdcbdb52d
|
[
"MIT"
] | null | null | null |
from iota.commands.extended.utils import find_transaction_objects
address=[]
address.append('LEYNSIMADMXAUYRGXKKEXPHDMZLRISZBSRZXUMCIKP9JQDOXSCIUGKYFFNPPVPGCHEJAWWSDHCKGOORPC')
transactions = find_transaction_objects(addresses=address)
for transaction in transactions:
# Ignore input transactions; these have cryptographic signatures,
# not human-readable messages.
if transaction.value < 0:
continue
print(f'Message from {transaction.hash}:')
message = transaction.signature_message_fragment
if message is None:
print('(None)')
else:
print(message.decode())
| 29.45
| 99
| 0.794567
|
5bcebd2bdbe7132a490db108446eab0af744b74a
| 1,232
|
py
|
Python
|
src/test.py
|
ZhenningLang/wheezy-captcha
|
7b84c88dffb896f75ea3912b1c3e5b8085ab400f
|
[
"MIT"
] | null | null | null |
src/test.py
|
ZhenningLang/wheezy-captcha
|
7b84c88dffb896f75ea3912b1c3e5b8085ab400f
|
[
"MIT"
] | null | null | null |
src/test.py
|
ZhenningLang/wheezy-captcha
|
7b84c88dffb896f75ea3912b1c3e5b8085ab400f
|
[
"MIT"
] | null | null | null |
import random
from captchacha.image import captcha
from captchacha.image import text, background, offset, rotate
from captchacha.image import curve, noise, smooth
if __name__ == '__main__':
import string
import os
color_choices = ('#674331', '#515329', '#725a38', '#68483e', '#7b2616', '#53595f')
def random_color():
return random.choice(color_choices)
current_path = os.path.split(os.path.realpath(__file__))[0]
captcha_image = captcha(drawings=[
background('#a5a4aa'), # #a5a4aa #aeada8
text(fonts=[os.path.join(current_path, '../fonts/CourierNew-Bold.ttf'),
os.path.join(current_path, '../fonts/Arial-Bold.ttf'),
os.path.join(current_path, '../fonts/CourierNew.ttf'),
os.path.join(current_path, '../fonts/Arial.ttf')],
color=random_color,
drawings=[
# warp(),
rotate(angle=45),
offset()
], squeeze_factor=0.6),
curve(),
noise(),
smooth()
], width=203, height=66)
image = captcha_image(random.sample(string.ascii_uppercase + string.digits, 6))
image.save('sample.jpg', 'JPEG', quality=75)
| 34.222222
| 86
| 0.591721
|
cda1e5d17adb74c0aaa37cb79e28eb27b8886557
| 4,663
|
py
|
Python
|
pandoc/filters/main.py
|
jasonchoimtt/dotfiles
|
3064785ddc4f5fd13118e15167ee38409eac5bc9
|
[
"MIT"
] | 13
|
2016-09-24T02:20:59.000Z
|
2017-04-27T09:15:02.000Z
|
pandoc/filters/main.py
|
jasonchoimtt/dotfiles
|
3064785ddc4f5fd13118e15167ee38409eac5bc9
|
[
"MIT"
] | null | null | null |
pandoc/filters/main.py
|
jasonchoimtt/dotfiles
|
3064785ddc4f5fd13118e15167ee38409eac5bc9
|
[
"MIT"
] | 1
|
2019-01-28T06:17:15.000Z
|
2019-01-28T06:17:15.000Z
|
#!/usr/bin/env python3
import os
import os.path
import panflute as pf
from codeblocks import codeblocks
from file_codeblocks import file_codeblocks
from listings import listings
DEFAULT_PACKAGES = ['unicode-math', (os.path.dirname(__file__) + '/mylistings')]
def default_packages(elem: pf.Element, doc: pf.Doc):
"""
Auto-includes some packages when output is latex.
"""
if type(elem) == pf.MetaMap and elem == doc.get_metadata(builtin=False) and \
doc.format == 'latex':
# Import packages automatically in latex
dct = dict(elem.content)
if 'header-includes' not in dct:
dct['header-includes'] = pf.MetaList()
header = '\n'.join('\\usepackage{' + p + '}' for p in DEFAULT_PACKAGES)
dct['header-includes'].append(pf.MetaInlines(
pf.RawInline(header, format='latex')))
return pf.MetaMap(**dct)
def display_math_align(elem: pf.Element, doc: pf.Doc):
"""
Syntax: $$& (align* environment content) $$
Latex align* environment. Also supported by HTML Math renderers like
MathJax.
"""
if type(elem) == pf.Math and elem.format == 'DisplayMath':
if elem.text[0] == '&':
text = '\\begin{align*}' + elem.text[1:] + '\\end{align*}'
if doc.format == 'latex':
return pf.RawInline(text, format='latex')
else:
elem.text = text
return elem
def include_files(elem: pf.Element, doc: pf.Doc):
"""
Syntax: 
 [codeblock attributes]
Includes the file as a codeblock, which can be processed by other filters.
"""
if type(elem) == pf.Para and len(elem.content) == 1:
child = elem.content[0]
if type(child) == pf.Image and len(child.content) == 1 and \
type(child.content[0] == pf.Str) and child.content[0].text == '#include':
cwd = os.getcwd()
path = os.path.abspath(child.url)
if path.startswith(cwd):
with open(path) as f:
code = f.read()
else:
code = '[Permission Denied]'
return pf.CodeBlock(code, child.identifier, child.classes, child.attributes)
section_first = True
def break_before_section(elem: pf.Element, doc: pf.Doc):
if type(elem) == pf.Header and elem.level == 1 and \
doc.format == 'latex' and doc.get_metadata('break-before-section', False):
# Except the first one, I guess
global section_first
if section_first:
section_first = False
return
return [pf.RawBlock('\\pagebreak', format='latex'), elem]
def rewrite_collapse(elem: pf.Element, doc: pf.Doc):
if type(elem) == pf.Div and 'collapse' in elem.classes:
label = elem.attributes.get('data-label')
prefix = [pf.Emph(pf.Str(label)), pf.Str(':'), pf.Space] if label else []
if doc.format == 'html':
heading = pf.Div(pf.Para(*prefix, pf.Str('[+]')), classes=['label'])
main = pf.Div(*elem.content, classes=['main'])
while elem.content:
elem.content.pop()
elem.content.extend([heading, main])
return elem
else:
return [pf.Para(*prefix), *elem.content]
def convert_latex(elem: pf.Element, doc: pf.Doc):
if type(elem) == pf.RawBlock and elem.format in ('latex', 'tex') and doc.format == 'html':
if elem.text == '\\qed':
return pf.Para(pf.Str('\u25a1'))
if type(elem) == pf.RawInline and elem.format in ('latex', 'tex') and doc.format == 'html':
if elem.text == '\\qed':
return pf.Span(pf.Str('\u25a1'), attributes={'style': 'display: block; text-align: right'})
def add_pandown_javascript(elem: pf.Element, doc: pf.Doc):
if (type(elem) == pf.Doc and doc.get_metadata('pandown-preview', False) and
doc.format == 'html'):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pandown.js')) as f:
pandown_javascript = f.read()
raw = '<script>\n{}\n</script>'.format(pandown_javascript)
doc.content.append(pf.RawBlock(raw, format='html'))
EXTRACT_FILE_CODEBLOCKS_FILTERS = [
display_math_align,
include_files,
file_codeblocks
]
DEFAULT_FILTERS = [
default_packages,
display_math_align,
include_files,
codeblocks,
file_codeblocks,
listings,
break_before_section,
rewrite_collapse,
convert_latex,
add_pandown_javascript,
]
if __name__ == '__main__':
pf.run_filters(DEFAULT_FILTERS)
| 34.036496
| 103
| 0.600043
|
549626bf4a5adfeec063b1b2a82c339de3c49581
| 9,326
|
py
|
Python
|
talon_draft_window/draft_talon_helpers.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 298
|
2020-02-23T03:00:51.000Z
|
2022-03-30T02:11:00.000Z
|
talon_draft_window/draft_talon_helpers.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 521
|
2020-02-21T18:21:17.000Z
|
2022-03-31T16:40:34.000Z
|
talon_draft_window/draft_talon_helpers.py
|
CameronSBell/knausj_talon
|
3e57e0165257cf07b0e21880d44a91e79cb3ef16
|
[
"MIT"
] | 499
|
2020-03-07T05:43:52.000Z
|
2022-03-28T12:24:54.000Z
|
from typing import Optional
from talon import ui, settings, Module, Context, actions
from .draft_ui import DraftManager
mod = Module()
# ctx is for toggling the draft_window_showing variable
# which lets you execute actions whenever the window is visible.
ctx = Context()
# ctx_focused is active only when the draft window is focussed. This
# lets you execute actions under that condition.
ctx_focused = Context()
ctx_focused.matches = r"""
title: Talon Draft
"""
mod.tag("draft_window_showing", desc="Tag set when draft window showing")
setting_theme = mod.setting(
"draft_window_theme",
type=str,
default="dark",
desc="Sets the main colors of the window, one of 'dark' or 'light'",
)
setting_label_size = mod.setting(
"draft_window_label_size",
type=int,
default=20,
desc="Sets the size of the word labels used in the draft window",
)
setting_label_color = mod.setting(
"draft_window_label_color",
type=str,
default=None,
desc=(
"Sets the color of the word labels used in the draft window. "
"E.g. 00ff00 would be green"
),
)
setting_text_size = mod.setting(
"draft_window_text_size",
type=int,
default=20,
desc="Sets the size of the text used in the draft window",
)
draft_manager = DraftManager()
# Update the styling of the draft window dynamically as user settings change
def _update_draft_style(*args):
draft_manager.set_styling(
**{
arg: setting.get()
for setting, arg in (
(setting_theme, "theme"),
(setting_label_size, "label_size"),
(setting_label_color, "label_color"),
(setting_text_size, "text_size"),
)
}
)
settings.register("", _update_draft_style)
@ctx_focused.action_class("user")
class ContextSensitiveDictationActions:
"""
Override these actions to assist 'Smart dictation mode'.
see https://github.com/knausj85/knausj_talon/pull/356
"""
def dictation_peek_left(clobber=False):
area = draft_manager.area
return area[max(0, area.sel.left - 50) : area.sel.left]
def dictation_peek_right():
area = draft_manager.area
return area[area.sel.right : area.sel.right + 50]
def paste(text: str):
# todo: remove once user.paste works reliably with the draft window
actions.insert(text)
@ctx_focused.action_class("edit")
class EditActions:
"""
Make default edit actions more efficient.
"""
def selected_text() -> str:
area = draft_manager.area
if area.sel:
result = area[area.sel.left : area.sel.right]
return result
return ""
from talon import cron
class UndoWorkaround:
"""
Workaround for the experimental textarea's undo being character by character.
This keeps a debounced undo history. Can be deleted once this todo item is
fixed: https://github.com/talonvoice/talon/issues/254#issuecomment-789149734
"""
# Set this to False if you want to turn it off, or just delete all references
# to this class
enable_workaround = True
# Stack of (text_value, selection) tuples representing the undo stack
undo_stack = []
# Stack of (text_value, selection) tuples representing the redo stack
redo_stack = []
# Used by the timer to check when the text has stopped changing
pending_undo = None
# timer handle
timer_handle = None
@classmethod
def start_logger(cls, reset_undo_stack: bool):
if reset_undo_stack:
cls.undo_stack = []
cls.redo_stack = []
cls.stop_logger()
cls.timer_handle = cron.interval("500ms", cls._log_changes)
@classmethod
def stop_logger(cls):
if cls.timer_handle is not None:
cron.cancel(cls.timer_handle)
cls.timer_handle = None
cls.pending_undo = None
@classmethod
def perform_undo(cls):
if len(cls.undo_stack) == 0:
return
curr_text = draft_manager.area.value
curr_sel = (draft_manager.area.sel.left, draft_manager.area.sel.right)
text, sel = cls.undo_stack[-1]
if text == curr_text:
cls.undo_stack.pop()
if len(cls.undo_stack) == 0:
return
# Most of the time (unless user has only just finished updating) the
# top of the stack will have the same contents as the text area. In
# this case pop again to get a bit lower. We should never have the
# same text twice, hence we don't need a loop.
text, sel = cls.undo_stack[-1]
# Remember the current state in the redo stack
cls.redo_stack.append((curr_text, curr_sel))
draft_manager.area.value = text
draft_manager.area.sel = sel
cls.pending_undo = (text, sel)
@classmethod
def perform_redo(cls):
if len(cls.redo_stack) == 0:
return
text, sel = cls.redo_stack.pop()
draft_manager.area.value = text
draft_manager.area.sel = sel
cls.pending_undo = (text, sel)
cls.undo_stack.append((text, sel))
@classmethod
def _log_changes(cls):
"""
If the text and cursor position hasn't changed for two interval iterations
(1s) and the undo stack doesn't match the current state, then add to the stack.
"""
curr_val = draft_manager.area.value
# Turn the Span into a tuple, because we can't == Spans
curr_sel = (draft_manager.area.sel.left, draft_manager.area.sel.right)
curr_state = (curr_val, curr_sel)
state_stack_mismatch = (
len(cls.undo_stack) == 0
or
# Only want to update the undo stack if the value has changed, not just
# the selection
curr_state[0] != cls.undo_stack[-1][0]
)
if cls.pending_undo == curr_state and state_stack_mismatch:
cls.undo_stack.append(curr_state)
# Clear out the redo stack because we've changed the text
cls.redo_stack = []
elif cls.pending_undo != curr_state:
cls.pending_undo = curr_state
elif not state_stack_mismatch and len(cls.undo_stack) > 0:
# Remember the cursor position in the undo stack for the current text value
cls.undo_stack[-1] = (cls.undo_stack[-1][0], curr_sel)
else:
# The text area text is not changing, do nothing
pass
if UndoWorkaround.enable_workaround:
ctx_focused.action("edit.undo")(UndoWorkaround.perform_undo)
ctx_focused.action("edit.redo")(UndoWorkaround.perform_redo)
@mod.action_class
class Actions:
def draft_show(text: Optional[str] = None):
"""
Shows draft window
"""
draft_manager.show(text)
UndoWorkaround.start_logger(text is not None)
ctx.tags = ["user.draft_window_showing"]
def draft_hide():
"""
Hides draft window
"""
draft_manager.hide()
UndoWorkaround.stop_logger()
ctx.tags = []
def draft_select(
start_anchor: str, end_anchor: str = "", include_trailing_whitespace: int = 0
):
"""
Selects text in the draft window
"""
draft_manager.select_text(
start_anchor,
end_anchor=None if end_anchor == "" else end_anchor,
include_trailing_whitespace=include_trailing_whitespace == 1,
)
def draft_position_caret(anchor: str, after: int = 0):
"""
Positions the caret in the draft window
"""
draft_manager.position_caret(anchor, after=after == 1)
def draft_get_text() -> str:
"""
Returns the text in the draft window
"""
return draft_manager.get_text()
def draft_resize(width: int, height: int):
"""
Resize the draft window.
"""
draft_manager.reposition(width=width, height=height)
def draft_named_move(name: str, screen_number: Optional[int] = None):
"""
Lets you move the window to the top, bottom, left, right, or middle
of the screen.
"""
screen = ui.screens()[screen_number or 0]
window_rect = draft_manager.get_rect()
xpos = (screen.width - window_rect.width) / 2
ypos = (screen.height - window_rect.height) / 2
if name == "top":
ypos = 50
elif name == "bottom":
ypos = screen.height - window_rect.height - 50
elif name == "left":
xpos = 50
elif name == "right":
xpos = screen.width - window_rect.width - 50
elif name == "middle":
# That's the default values
pass
# Adjust for the fact that the screen may not be at 0,0.
xpos += screen.x
ypos += screen.y
draft_manager.reposition(xpos=xpos, ypos=ypos)
# Some capture groups we need
@mod.capture(rule="{self.letter}+")
def draft_anchor(m) -> str:
"""
An anchor (string of letters)
"""
return "".join(m)
@mod.capture(rule="(top|bottom|left|right|middle)")
def draft_window_position(m) -> str:
"""
One of the named positions you can move the window to
"""
return "".join(m)
| 29.05296
| 87
| 0.622239
|
a8d4d22e66f5a5fbde090c34d5077b7f8ddbae8d
| 3,365
|
py
|
Python
|
projects/ocr/ocr.py
|
julien-amar/date-a-scientist
|
8748516ab5bcfca488e6ef6ecb4fcd3786daa8fc
|
[
"Apache-2.0"
] | 4
|
2019-02-11T22:18:51.000Z
|
2021-02-21T10:46:24.000Z
|
projects/ocr/ocr.py
|
julien-amar/code-academy-ml
|
8748516ab5bcfca488e6ef6ecb4fcd3786daa8fc
|
[
"Apache-2.0"
] | 1
|
2018-11-14T15:00:01.000Z
|
2018-11-14T15:00:01.000Z
|
projects/ocr/ocr.py
|
julien-amar/date-a-scientist
|
8748516ab5bcfca488e6ef6ecb4fcd3786daa8fc
|
[
"Apache-2.0"
] | 6
|
2019-06-22T12:28:38.000Z
|
2021-07-23T08:53:20.000Z
|
import codecademylib3_seaborn
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
# Get Optical Recognition of Handwritten Digits Data Set
digits = datasets.load_digits()
# Get data set description
print (digits.DESCR)
# Get data set pixels
print (digits.data)
# Get data set labels
print (digits.target)
# Define 10 clusters (as we have 10 digits (0 to 9))
model = KMeans(n_clusters=10, random_state=42)
# Cluster the data
model.fit(digits.data)
# Figure size (width, height)
fig = plt.figure(figsize=(8, 3))
fig.suptitle('Cluser Center Images', fontsize=14, fontweight='bold')
for i in range(10):
# Initialize subplots in a grid of 2X5, at i+1th position
ax = fig.add_subplot(2, 5, 1 + i)
# Display images
ax.imshow(model.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)
plt.show()
# Adjust the subplots
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# For each of the 64 images
for i in range(64):
# Initialize the subplots: add a subplot in the grid of 8 by 8, at the i+1-th position
ax = fig.add_subplot(8, 8, i+1, xticks=[], yticks=[])
# Display an image at the i-th position
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# Label the image with the target value
ax.text(0, 7, str(digits.target[i]))
plt.show()
# Try text recognition by drawing digits
new_samples = np.array([
[0.08,2.27,4.19,6.17,5.03,0.15,0.00,0.00,4.17,7.62,7.39,6.47,7.62,7.00,1.83,0.00,6.69,5.55,0.30,0.08,2.57,7.30,4.26,0.00,6.85,4.49,0.00,0.00,0.00,7.23,4.18,0.00,5.49,6.86,0.30,0.00,0.91,7.62,2.82,0.00,2.21,7.61,2.58,0.00,3.57,7.62,1.67,0.00,0.31,7.09,7.62,7.07,7.62,5.93,0.07,0.00,0.00,1.05,3.50,3.81,2.96,0.15,0.00,0.00],
[0.00,0.23,3.87,6.09,4.95,0.46,0.00,0.00,0.00,3.58,7.62,6.00,7.62,3.88,0.00,0.00,0.00,0.68,1.82,0.00,5.93,5.32,0.00,0.00,0.00,0.00,0.00,1.52,7.15,5.25,0.00,0.00,0.00,0.00,0.38,6.78,6.93,1.82,0.00,0.00,0.00,0.00,4.77,7.60,3.57,0.45,0.00,0.00,0.00,0.00,7.46,7.62,7.61,7.62,1.83,0.00,0.00,0.00,1.13,1.52,2.27,3.04,0.46,0.00],
[0.00,0.83,6.46,6.85,6.86,7.39,3.36,0.00,0.00,1.45,7.62,5.02,3.81,3.80,1.06,0.00,0.00,0.46,7.61,5.33,3.81,3.81,2.59,0.23,0.00,0.00,7.16,7.62,6.86,7.17,7.62,6.55,0.00,0.00,0.00,0.00,0.00,0.23,4.63,7.61,0.00,0.00,1.14,1.67,4.02,6.92,7.62,7.61,0.00,0.00,7.47,7.62,7.62,5.62,2.72,0.37,0.00,0.00,1.14,1.52,0.90,0.00,0.00,0.00],
[0.00,0.91,4.57,4.57,4.48,1.52,0.00,0.00,0.00,1.37,6.09,6.09,7.30,5.09,0.00,0.00,0.00,0.00,0.00,0.60,6.84,5.25,0.00,0.00,0.00,0.00,0.00,7.15,7.62,6.77,2.97,0.00,0.00,0.00,0.00,2.58,3.42,6.46,6.85,0.00,0.00,0.07,2.89,4.26,4.57,6.69,6.77,0.00,0.00,0.54,7.39,7.31,6.24,5.86,2.04,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00,0.00]
])
new_labels = model.predict(new_samples)
print (new_labels)
for i in range(len(new_labels)):
if new_labels[i] == 0:
print(0, end='')
elif new_labels[i] == 1:
print(9, end='')
elif new_labels[i] == 2:
print(2, end='')
elif new_labels[i] == 3:
print(1, end='')
elif new_labels[i] == 4:
print(6, end='')
elif new_labels[i] == 5:
print(8, end='')
elif new_labels[i] == 6:
print(4, end='')
elif new_labels[i] == 7:
print(5, end='')
elif new_labels[i] == 8:
print(7, end='')
elif new_labels[i] == 9:
print(3, end='')
| 39.127907
| 322
| 0.643388
|
939ab50848aeb5efc6f5ff0cd91d3b2cff0b7235
| 180
|
py
|
Python
|
b.py
|
kelvinndmo/me
|
06b9959b4284abc20b5ee365223381c75ec574d9
|
[
"MIT"
] | null | null | null |
b.py
|
kelvinndmo/me
|
06b9959b4284abc20b5ee365223381c75ec574d9
|
[
"MIT"
] | null | null | null |
b.py
|
kelvinndmo/me
|
06b9959b4284abc20b5ee365223381c75ec574d9
|
[
"MIT"
] | null | null | null |
def inv(c):
if 'a' <= c <= 'z':
print(chr(122 - ord(c) + 97))
if 'A' <= c <= 'Z':
print(chr(90 - ord(c) + 65))
return c
''.join(inv(c) for c in 'az')
| 18
| 37
| 0.405556
|
b062991e4478398f17b8c641f7baba2ae9706233
| 1,135
|
py
|
Python
|
powerline_shell/themes/load_theme.py
|
aradzu10/powerline-shell
|
299493eb3ad65b0331d9369a279833e61237d9a6
|
[
"MIT"
] | null | null | null |
powerline_shell/themes/load_theme.py
|
aradzu10/powerline-shell
|
299493eb3ad65b0331d9369a279833e61237d9a6
|
[
"MIT"
] | null | null | null |
powerline_shell/themes/load_theme.py
|
aradzu10/powerline-shell
|
299493eb3ad65b0331d9369a279833e61237d9a6
|
[
"MIT"
] | null | null | null |
"""
We load themes that way in order to improve performance.
"""
from powerline_shell import utils
def aradz():
from powerline_shell.themes import aradz
return aradz.Color
def basic():
from powerline_shell.themes import basic
return basic.Color
def default():
from powerline_shell.themes import default
return default.Color
def gruvbox():
from powerline_shell.themes import gruvbox
return gruvbox.Color
def solarized_dark():
from powerline_shell.themes import solarized_dark
return solarized_dark.Color
def solarized_light():
from powerline_shell.themes import solarized_light
return solarized_light.Color
def washed():
from powerline_shell.themes import washed
return washed.Color
THEMES_NAMES = {
"aradz": aradz,
"basic": basic,
"default": default,
"gruvbox": gruvbox,
"solarized_dark": solarized_dark,
"solarized_light": solarized_light,
"washed": washed,
}
def load_theme(theme_name):
if theme_name not in THEMES_NAMES:
utils.warn("There is no theme with name %s" % theme_name)
return THEMES_NAMES[theme_name]()
| 19.568966
| 65
| 0.722467
|
272e64a18107e0482e7efbd9518c18b9fceb0f5b
| 19,134
|
py
|
Python
|
eval_vit_on_medical.py
|
ericpts/OD-test-master
|
82bdf234e69660d3c2e59c06062445196b865a79
|
[
"MIT"
] | null | null | null |
eval_vit_on_medical.py
|
ericpts/OD-test-master
|
82bdf234e69660d3c2e59c06062445196b865a79
|
[
"MIT"
] | null | null | null |
eval_vit_on_medical.py
|
ericpts/OD-test-master
|
82bdf234e69660d3c2e59c06062445196b865a79
|
[
"MIT"
] | null | null | null |
import glob
import random
import shutil
import time
import numpy as np
import sys
import os
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
from flax.training import checkpoints as flax_checkpoints
from vit_jax import checkpoint
from vit_jax import models
from vit_jax import train
from vit_jax.configs import augreg as augreg_config
from vit_jax.configs import models as models_config
from vit_jax import input_pipeline
from vit_jax.configs.common import with_dataset
from medical_ood import lib_medical_ood as med
from absl import logging
import pandas as pd
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from matplotlib import pyplot as plt
import os.path as osp
from sklearn.metrics import roc_auc_score
import argparse
import mlflow
from mlflow.tracking import MlflowClient
os.environ["MLFLOW_TRACKING_USERNAME"] = "exp-01.mlflow-yang.tifreaa"
os.environ["MLFLOW_TRACKING_PASSWORD"] = "parola"
remote_server_uri = "https://exp-01.mlflow-yang.inf.ethz.ch"
mlflow.set_tracking_uri(remote_server_uri)
N_train = 50000
N_test = 10000
# If tf-gpu is used, the tfds datasets will be loaded onto the GPU which will
# fill the GPU memory and won't allow for the ViT model to be loaded.
assert tf.config.list_physical_devices("GPU") == []
def main():
curr_run = mlflow.active_run()
ckpt_path = curr_run.data.params["finetuned_ckpt_path"].split("/", 2)[-1]
path = osp.join(os.environ["MEDICAL_OOD_DATA_PATH"], "vit_finetuned", ckpt_path)
ds_name = curr_run.data.params["dataset"]
model_config = models_config.AUGREG_CONFIGS[
curr_run.data.params["model.name"].split("-")[-1]
]
resolution = int(curr_run.data.params["pp.crop"])
num_classes = input_pipeline.get_dataset_info(ds_name, split="test")["num_classes"]
ood_ds_name = {
"cifar10": "cifar100",
"cifar100": "cifar10",
"drd": "riga",
"nih_id": "nih_ood",
"pc_id": "pc_uc3",
}[ds_name]
# Load a checkpoint from cloud
# takes a while for a big model
if path.endswith(".npz"):
params = checkpoint.load(path)
else:
params = flax_checkpoints.restore_checkpoint(path, models.VisionTransformer)[
"0"
]["target"]
# Get a clean model and a modified model that outputs pre-logits = embeddings for OOD detection
model = models.VisionTransformer(num_classes=num_classes, **model_config)
model_prelogits = models.VisionTransformer_prelogits(
num_classes=num_classes, **model_config
)
print("Start loading datasets...")
ood_test = prepare_pure_dataset(
med.load_dataset(ood_ds_name, split="test"),
num_classes,
shuffle=False,
resolution=resolution,
)
id_test = prepare_pure_dataset(
med.load_dataset(ds_name, split="test"),
num_classes,
shuffle=False,
resolution=resolution,
)
id_train = prepare_pure_dataset(
med.load_dataset(ds_name, split="train"),
num_classes,
shuffle=False,
resolution=resolution,
)
sanity_check_datasets(id_train, id_test, ood_test)
print("Start running forward passes...")
id_test_prelogits, id_test_logits, id_test_labels = standalone_get_prelogits(
model, model_prelogits, params, id_test, image_count=N_test
)
ood_test_prelogits, ood_test_logits, ood_test_labels = standalone_get_prelogits(
model, model_prelogits, params, ood_test, image_count=N_test
)
id_train_prelogits, id_train_logits, id_train_labels = standalone_get_prelogits(
model, model_prelogits, params, id_train, image_count=N_train
)
# Check prediction accuracy on ID data.
finetune_test_acc = np.mean(np.argmax(id_test_logits, axis=-1) == id_test_labels)
print(f"{ds_name} test accuracy = " + str(finetune_test_acc))
finetune_train_acc = np.mean(np.argmax(id_train_logits, axis=-1) == id_train_labels)
print(f"{ds_name} train accuracy = " + str(finetune_train_acc))
(
mahal_auroc,
maha_intermediate_dict,
indist_dists,
outdist_dists,
) = standard_mahal_auroc(
id_y_train=id_train_labels,
id_train_embeds=id_train_prelogits,
id_test_embeds=id_test_prelogits,
ood_test_embeds=ood_test_prelogits,
num_classes=num_classes,
)
new_mahal_auroc = relative_mahal_auroc(
id_y_train=id_train_labels,
id_train_embeds=id_train_prelogits,
id_test_embeds=id_test_prelogits,
ood_test_embeds=ood_test_prelogits,
num_classes=num_classes,
maha_intermediate_dict=maha_intermediate_dict,
indist_dists=indist_dists,
outdist_dists=outdist_dists,
)
msp_auroc = max_softmax_auroc(
id_test_logits=id_test_logits, ood_test_logits=ood_test_logits
)
if "ood_dataset" not in curr_run.data.params:
mlflow.log_param("ood_dataset", ood_ds_name)
else:
assert ood_ds_name == curr_run.data.params["ood_dataset"]
mlflow.log_metrics(
{
"finetune_test_acc": finetune_test_acc,
"finetune_train_acc": finetune_train_acc,
"mahal_auroc": mahal_auroc,
"new_mahal_auroc": new_mahal_auroc,
"max_softmax_auroc": msp_auroc,
}
)
print(
f"[ID={ds_name}; OOD={ood_ds_name}] Mahal AUROC={mahal_auroc}; New Mahal AUROC={new_mahal_auroc}; MSP AUROC={msp_auroc}"
)
def prepare_pure_dataset(
ds_in, num_classes=2, repeats=1, shuffle=True, resolution=224, batch_size=128
):
def pp(img, sz):
img = tf.cast(img, float)
img = tf.image.resize(img, [sz, sz])
return img
ds_in = ds_in.map(
lambda img, y: {"image": pp(img, resolution), "label": y},
tf.data.experimental.AUTOTUNE,
)
ds_in = ds_in.repeat(repeats)
if shuffle:
ds_in = ds_in.shuffle(200000)
ds_in = ds_in.batch(batch_size, drop_remainder=True)
return ds_in
def sanity_check_datasets(id_train, id_test, ood_test):
def get_value_spreads_for_dataset(ds_in):
batch = next(ds_in.as_numpy_iterator())
images = batch["image"]
min_now, mean_now, max_now = np.min(images), np.mean(images), np.max(images)
return min_now, mean_now, max_now
min_now, mean_now, max_now = get_value_spreads_for_dataset(id_train)
print(f"[ID train] Pixel statistics (min, mean, max):", min_now, mean_now, max_now)
min_now, mean_now, max_now = get_value_spreads_for_dataset(id_test)
print(f"[ID test] Pixel statistics (min, mean, max):", min_now, mean_now, max_now)
min_now, mean_now, max_now = get_value_spreads_for_dataset(ood_test)
print(f"[OOD test] Pixel statistics (min, mean, max):", min_now, mean_now, max_now)
def standalone_get_prelogits(
model, model_prelogits, params, ds_in, image_count=50000, batch_size=128
):
"""Returns prelogits on the dataset"""
prelogits_all = []
logits_all = []
labels_all = []
ts = []
t1 = time.time()
for batch in ds_in.as_numpy_iterator():
prelogits = model_prelogits.apply(
{"params": params}, batch["image"], train=False
)
logits = model.apply({"params": params}, batch["image"], train=False)
prelogits_all.append(prelogits)
logits_all.append(logits)
labels_all.append(batch["label"])
count_so_far = len(np.concatenate(prelogits_all, axis=0))
t2 = time.time()
ts.append(t2 - t1)
t1 = time.time()
t_rem = (image_count - count_so_far) * np.mean(ts) / batch_size
print(
"Images done="
+ str(count_so_far)
+ " time remaining="
+ str(int(t_rem))
+ "s"
)
if count_so_far >= image_count:
break # early break for subsets of data
return (
np.concatenate(prelogits_all, axis=0),
np.concatenate(logits_all, axis=0),
np.concatenate(labels_all, axis=0),
)
def get_scores(
indist_train_embeds_in,
indist_train_labels_in,
indist_test_embeds_in,
outdist_test_embeds_in,
subtract_mean=True,
normalize_to_unity=True,
subtract_train_distance=True,
indist_classes=2,
norm_name="L2",
):
# storing the replication results
maha_intermediate_dict = dict()
description = ""
all_train_mean = np.mean(indist_train_embeds_in, axis=0, keepdims=True)
indist_train_embeds_in_touse = indist_train_embeds_in
indist_test_embeds_in_touse = indist_test_embeds_in
outdist_test_embeds_in_touse = outdist_test_embeds_in
if subtract_mean:
indist_train_embeds_in_touse -= all_train_mean
indist_test_embeds_in_touse -= all_train_mean
outdist_test_embeds_in_touse -= all_train_mean
description = description + " subtract mean,"
if normalize_to_unity:
indist_train_embeds_in_touse = indist_train_embeds_in_touse / np.linalg.norm(
indist_train_embeds_in_touse, axis=1, keepdims=True
)
indist_test_embeds_in_touse = indist_test_embeds_in_touse / np.linalg.norm(
indist_test_embeds_in_touse, axis=1, keepdims=True
)
outdist_test_embeds_in_touse = outdist_test_embeds_in_touse / np.linalg.norm(
outdist_test_embeds_in_touse, axis=1, keepdims=True
)
description = description + " unit norm,"
# full train single fit
mean = np.mean(indist_train_embeds_in_touse, axis=0)
cov = np.cov((indist_train_embeds_in_touse - (mean.reshape([1, -1]))).T)
eps = 1e-8
cov_inv = np.linalg.inv(cov)
# getting per class means and covariances
class_means = []
class_cov_invs = []
class_covs = []
for c in range(indist_classes):
mean_now = np.mean(
indist_train_embeds_in_touse[indist_train_labels_in == c], axis=0
)
cov_now = np.cov(
(
indist_train_embeds_in_touse[indist_train_labels_in == c]
- (mean_now.reshape([1, -1]))
).T
)
class_covs.append(cov_now)
# print(c)
eps = 1e-8
cov_inv_now = np.linalg.inv(cov_now)
class_cov_invs.append(cov_inv_now)
class_means.append(mean_now)
# the average covariance for class specific
class_cov_invs = [
np.linalg.inv(np.mean(np.stack(class_covs, axis=0), axis=0))
] * len(class_covs)
maha_intermediate_dict["class_cov_invs"] = class_cov_invs
maha_intermediate_dict["class_means"] = class_means
maha_intermediate_dict["cov_inv"] = cov_inv
maha_intermediate_dict["mean"] = mean
out_totrain = maha_distance(outdist_test_embeds_in_touse, cov_inv, mean, norm_name)
in_totrain = maha_distance(indist_test_embeds_in_touse, cov_inv, mean, norm_name)
out_totrainclasses = [
maha_distance(
outdist_test_embeds_in_touse, class_cov_invs[c], class_means[c], norm_name
)
for c in range(indist_classes)
]
in_totrainclasses = [
maha_distance(
indist_test_embeds_in_touse, class_cov_invs[c], class_means[c], norm_name
)
for c in range(indist_classes)
]
out_scores = np.min(np.stack(out_totrainclasses, axis=0), axis=0)
in_scores = np.min(np.stack(in_totrainclasses, axis=0), axis=0)
if subtract_train_distance:
out_scores = out_scores - out_totrain
in_scores = in_scores - in_totrain
onehots = np.array([1] * len(out_scores) + [0] * len(in_scores))
scores = np.concatenate([out_scores, in_scores], axis=0)
return onehots, scores, description, maha_intermediate_dict
def get_auroc(onehots, scores, make_plot=True, add_to_title=None, swap_classes=False):
auroc = roc_auc_score(onehots, scores)
to_replot_dict = dict()
if swap_classes == False:
out_scores, in_scores = scores[onehots == 0], scores[onehots == 1]
else:
out_scores, in_scores = scores[onehots == 1], scores[onehots == 0]
if make_plot:
plt.figure(figsize=(5.5, 3), dpi=100)
if add_to_title is not None:
plt.title(
add_to_title + " AUROC=" + str(float(auroc * 100))[:6] + "%",
fontsize=14,
)
else:
plt.title(" AUROC=" + str(float(auroc * 100))[:6] + "%", fontsize=14)
vals, bins = np.histogram(out_scores, bins=51)
bin_centers = (bins[1:] + bins[:-1]) / 2.0
if make_plot:
plt.plot(
bin_centers, vals, linewidth=4, color="navy", marker="", label="in test"
)
plt.fill_between(bin_centers, vals, [0] * len(vals), color="navy", alpha=0.3)
to_replot_dict["out_bin_centers"] = bin_centers
to_replot_dict["out_vals"] = vals
vals, bins = np.histogram(in_scores, bins=51)
bin_centers = (bins[1:] + bins[:-1]) / 2.0
if make_plot:
plt.plot(
bin_centers, vals, linewidth=4, color="crimson", marker="", label="out test"
)
plt.fill_between(bin_centers, vals, [0] * len(vals), color="crimson", alpha=0.3)
to_replot_dict["in_bin_centers"] = bin_centers
to_replot_dict["in_vals"] = vals
if make_plot:
plt.xlabel("Score", fontsize=14)
plt.ylabel("Count", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.ylim([0, None])
plt.legend(fontsize=14)
plt.tight_layout()
plt.show()
return auroc, to_replot_dict
def standard_mahal_auroc(
id_y_train, id_train_embeds, id_test_embeds, ood_test_embeds, num_classes
):
onehots, scores, description, maha_intermediate_dict = get_scores(
np.array(id_train_embeds)[:, :],
id_y_train,
np.array(id_test_embeds)[:, :],
np.array(ood_test_embeds)[:, :],
indist_classes=num_classes,
subtract_mean=False,
normalize_to_unity=False,
subtract_train_distance=False,
)
class_means = maha_intermediate_dict["class_means"]
class_cov_invs = maha_intermediate_dict["class_cov_invs"]
indist_test_embeds = id_test_embeds
outdist_test_embeds = ood_test_embeds
indist_dists = []
for c in range(num_classes):
indist_offset_now = indist_test_embeds - class_means[c].reshape([1, -1])
maha_dists_now = np.sum(
np.matmul(indist_offset_now, class_cov_invs[c]) * indist_offset_now, axis=1
)
indist_dists.append(maha_dists_now)
outdist_dists = []
for c in range(num_classes):
outdist_offset_now = outdist_test_embeds - class_means[c].reshape([1, -1])
maha_dists_now = np.sum(
np.matmul(outdist_offset_now, class_cov_invs[c]) * outdist_offset_now,
axis=1,
)
outdist_dists.append(maha_dists_now)
indist_dists_byclass = np.stack(indist_dists, axis=1)
indist_min = np.min(indist_dists_byclass, axis=1)
outdist_dists_byclass = np.stack(outdist_dists, axis=1)
outdist_min = np.min(outdist_dists_byclass, axis=1)
onehots = np.array([1] * len(outdist_min) + [0] * len(indist_min))
scores = np.concatenate([outdist_min, indist_min], axis=0)
auroc, to_replot_dict = get_auroc(
onehots,
scores,
make_plot=False,
)
return auroc, maha_intermediate_dict, indist_dists, outdist_dists
def relative_mahal_auroc(
id_y_train,
id_train_embeds,
id_test_embeds,
ood_test_embeds,
num_classes,
maha_intermediate_dict,
indist_dists,
outdist_dists,
):
train_mean = maha_intermediate_dict["mean"]
train_cov_inv = maha_intermediate_dict["cov_inv"]
onehots, scores, description, _ = get_scores(
np.array(id_train_embeds)[:, :],
id_y_train,
np.array(id_test_embeds)[:, :],
np.array(ood_test_embeds)[:, :],
indist_classes=num_classes,
subtract_mean=False,
normalize_to_unity=False,
subtract_train_distance=True,
)
indist_dists_byclass = np.stack(indist_dists, axis=1)
indist_min = np.min(indist_dists_byclass, axis=1)
outdist_dists_byclass = np.stack(outdist_dists, axis=1)
outdist_min = np.min(outdist_dists_byclass, axis=1)
onehots = np.array([1] * len(outdist_min) + [0] * len(indist_min))
scores = np.concatenate([outdist_min, indist_min], axis=0)
indist_dists_byclass = np.stack(indist_dists, axis=1)
indist_min = np.min(indist_dists_byclass, axis=1)
outdist_dists_byclass = np.stack(outdist_dists, axis=1)
outdist_min = np.min(outdist_dists_byclass, axis=1)
indist_test_embeds = id_test_embeds
outdist_test_embeds = ood_test_embeds
prelogits = indist_test_embeds
offset_now = prelogits - np.array(train_mean).reshape([1, -1]).astype(np.float64)
offset_now = offset_now.astype(np.float64)
train_maha_dist = np.einsum(
"ai,ij->aj", offset_now, np.array(train_cov_inv).astype(np.float64)
)
train_maha_dist = np.einsum("aj,aj->a", train_maha_dist, offset_now)
indist_train_dist = train_maha_dist
prelogits = outdist_test_embeds
offset_now = prelogits - np.array(train_mean).reshape([1, -1]).astype(np.float64)
offset_now = offset_now.astype(np.float64)
train_maha_dist = np.einsum(
"ai,ij->aj", offset_now, np.array(train_cov_inv).astype(np.float64)
)
train_maha_dist = np.einsum("aj,aj->a", train_maha_dist, offset_now)
outdist_train_dist = train_maha_dist
outdist_scores = outdist_min - outdist_train_dist
indist_scores = indist_min - indist_train_dist
onehots = np.array([1] * len(outdist_min) + [0] * len(indist_min))
scores = np.concatenate([outdist_scores, indist_scores], axis=0)
auroc, to_replot_dict = get_auroc(
onehots,
scores,
make_plot=False,
)
return auroc
def max_softmax_auroc(id_test_logits, ood_test_logits):
scores = np.array(
np.concatenate(
[
np.max(np_softmax(id_test_logits), axis=-1),
np.max(np_softmax(ood_test_logits), axis=-1),
],
axis=0,
)
)
onehots = np.array([1] * len(id_test_logits) + [0] * len(ood_test_logits))
auroc, to_replot_dict = get_auroc(
onehots,
scores,
make_plot=False,
swap_classes=True,
)
return auroc
def np_softmax(zs):
exps = np.exp(zs - np.max(zs))
return exps / np.sum(exps, axis=-1, keepdims=True)
def maha_distance(xs, cov_inv_in, mean_in, norm_type=None):
diffs = xs - mean_in.reshape([1, -1])
second_powers = np.matmul(diffs, cov_inv_in) * diffs
if norm_type in [None, "L2"]:
return np.sum(second_powers, axis=1)
elif norm_type in ["L1"]:
return np.sum(np.sqrt(np.abs(second_powers)), axis=1)
elif norm_type in ["Linfty"]:
return np.max(second_powers, axis=1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--run_id",
type=str,
required=True,
)
args = parser.parse_args()
with mlflow.start_run(run_id=args.run_id):
main()
| 31.731343
| 128
| 0.667242
|
f45903d86ee42f2cc3d9fb085987c5856513e6b3
| 31,571
|
py
|
Python
|
litedram/init.py
|
thirtythreeforty/litedram
|
db879ae3f7d591482e4665801c946241bb663bce
|
[
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | null | null | null |
litedram/init.py
|
thirtythreeforty/litedram
|
db879ae3f7d591482e4665801c946241bb663bce
|
[
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | null | null | null |
litedram/init.py
|
thirtythreeforty/litedram
|
db879ae3f7d591482e4665801c946241bb663bce
|
[
"OLDAP-2.6",
"OLDAP-2.3",
"OLDAP-2.7"
] | null | null | null |
#
# This file is part of LiteDRAM.
#
# Copyright (c) 2013-2014 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2013-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2017 whitequark <whitequark@whitequark.org>
# Copyright (c) 2014 Yann Sionneau <ys@m-labs.hk>
# Copyright (c) 2018 bunnie <bunnie@kosagi.com>
# Copyright (c) 2019 Gabriel L. Somlo <gsomlo@gmail.com>
# Copyright (c) 2021 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
import math
from contextlib import contextmanager
from migen import *
cmds = {
"PRECHARGE_ALL": "DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS",
"MODE_REGISTER": "DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS",
"AUTO_REFRESH": "DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_CS",
"UNRESET": "DFII_CONTROL_ODT|DFII_CONTROL_RESET_N",
"CKE": "DFII_CONTROL_CKE|DFII_CONTROL_ODT|DFII_CONTROL_RESET_N"
}
# SDR ----------------------------------------------------------------------------------------------
def get_sdr_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = phy_settings.nphases
mr = log2_int(bl) + (cl << 4)
reset_dll = 1 << 8
init_sequence = [
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# DDR ----------------------------------------------------------------------------------------------
def get_ddr_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 4
mr = log2_int(bl) + (cl << 4)
emr = 0
reset_dll = 1 << 8
init_sequence = [
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# LPDDR --------------------------------------------------------------------------------------------
def get_lpddr_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 4
mr = log2_int(bl) + (cl << 4)
emr = 0
reset_dll = 1 << 8
init_sequence = [
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Extended Mode Register", emr, 2, cmds["MODE_REGISTER"], 0),
("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# DDR2 ---------------------------------------------------------------------------------------------
def get_ddr2_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 4
wr = 2
mr = log2_int(bl) + (cl << 4) + (wr << 9)
emr = 0
emr2 = 0
emr3 = 0
ocd = 7 << 7
reset_dll = 1 << 8
init_sequence = [
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Extended Mode Register 3", emr3, 3, cmds["MODE_REGISTER"], 0),
("Load Extended Mode Register 2", emr2, 2, cmds["MODE_REGISTER"], 0),
("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200),
("Load Extended Mode Register / OCD Default", emr+ocd, 1, cmds["MODE_REGISTER"], 0),
("Load Extended Mode Register / OCD Exit", emr, 1, cmds["MODE_REGISTER"], 0),
]
return init_sequence, None
# DDR3 ---------------------------------------------------------------------------------------------
def get_ddr3_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 8
cwl = phy_settings.cwl
def format_mr0(bl, cl, wr, dll_reset):
bl_to_mr0 = {
4: 0b10,
8: 0b00
}
cl_to_mr0 = {
5: 0b0010,
6: 0b0100,
7: 0b0110,
8: 0b1000,
9: 0b1010,
10: 0b1100,
11: 0b1110,
12: 0b0001,
13: 0b0011,
14: 0b0101
}
wr_to_mr0 = {
16: 0b000,
5: 0b001,
6: 0b010,
7: 0b011,
8: 0b100,
10: 0b101,
12: 0b110,
14: 0b111
}
mr0 = bl_to_mr0[bl]
mr0 |= (cl_to_mr0[cl] & 1) << 2
mr0 |= ((cl_to_mr0[cl] >> 1) & 0b111) << 4
mr0 |= dll_reset << 8
mr0 |= wr_to_mr0[wr] << 9
return mr0
def format_mr1(ron, rtt_nom, tdqs):
mr1 = ((ron >> 0) & 1) << 1
mr1 |= ((ron >> 1) & 1) << 5
mr1 |= ((rtt_nom >> 0) & 1) << 2
mr1 |= ((rtt_nom >> 1) & 1) << 6
mr1 |= ((rtt_nom >> 2) & 1) << 9
mr1 |= (tdqs & 1) << 11
return mr1
def format_mr2(cwl, rtt_wr):
mr2 = (cwl-5) << 3
mr2 |= rtt_wr << 9
return mr2
z_to_rtt_nom = {
"disabled" : 0,
"60ohm" : 1,
"120ohm" : 2,
"40ohm" : 3,
"20ohm" : 4,
"30ohm" : 5
}
z_to_rtt_wr = {
"disabled" : 0,
"60ohm" : 1,
"120ohm" : 2,
}
z_to_ron = {
"40ohm" : 0,
"34ohm" : 1,
}
# default electrical settings (point to point)
rtt_nom = "60ohm"
rtt_wr = "60ohm"
ron = "34ohm"
tdqs = 0
# override electrical settings if specified
if hasattr(phy_settings, "rtt_nom"):
rtt_nom = phy_settings.rtt_nom
if hasattr(phy_settings, "rtt_wr"):
rtt_wr = phy_settings.rtt_wr
if hasattr(phy_settings, "ron"):
ron = phy_settings.ron
if getattr(phy_settings, "tdqs", False):
tdqs = 1
wr = max(timing_settings.tWTR*phy_settings.nphases, 5) # >= ceiling(tWR/tCK)
mr0 = format_mr0(bl, cl, wr, 1)
mr1 = format_mr1(z_to_ron[ron], z_to_rtt_nom[rtt_nom], tdqs)
mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
mr3 = 0
init_sequence = [
("Release reset", 0x0000, 0, cmds["UNRESET"], 50000),
("Bring CKE high", 0x0000, 0, cmds["CKE"], 10000),
("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
("Load Mode Register 3", mr3, 3, cmds["MODE_REGISTER"], 0),
("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
]
return init_sequence, {1: mr1}
# DDR4 ---------------------------------------------------------------------------------------------
def get_ddr4_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 8
cwl = phy_settings.cwl
def format_mr0(bl, cl, wr, dll_reset):
bl_to_mr0 = {
4: 0b10,
8: 0b00
}
cl_to_mr0 = {
9: 0b00000,
10: 0b00001,
11: 0b00010,
12: 0b00011,
13: 0b00100,
14: 0b00101,
15: 0b00110,
16: 0b00111,
18: 0b01000,
20: 0b01001,
22: 0b01010,
24: 0b01011,
23: 0b01100,
17: 0b01101,
19: 0b01110,
21: 0b01111,
25: 0b10000,
26: 0b10001,
27: 0b10010,
28: 0b10011,
29: 0b10100,
30: 0b10101,
31: 0b10110,
32: 0b10111,
}
wr_to_mr0 = {
10: 0b0000,
12: 0b0001,
14: 0b0010,
16: 0b0011,
18: 0b0100,
20: 0b0101,
24: 0b0110,
22: 0b0111,
26: 0b1000,
28: 0b1001,
}
mr0 = bl_to_mr0[bl]
mr0 |= (cl_to_mr0[cl] & 0b1) << 2
mr0 |= ((cl_to_mr0[cl] >> 1) & 0b111) << 4
mr0 |= ((cl_to_mr0[cl] >> 4) & 0b1) << 12
mr0 |= dll_reset << 8
mr0 |= (wr_to_mr0[wr] & 0b111) << 9
mr0 |= (wr_to_mr0[wr] >> 3) << 13
return mr0
def format_mr1(dll_enable, ron, rtt_nom, tdqs):
mr1 = dll_enable
mr1 |= ((ron >> 0) & 0b1) << 1
mr1 |= ((ron >> 1) & 0b1) << 2
mr1 |= ((rtt_nom >> 0) & 0b1) << 8
mr1 |= ((rtt_nom >> 1) & 0b1) << 9
mr1 |= ((rtt_nom >> 2) & 0b1) << 10
mr1 |= (tdqs & 0b1) << 11
return mr1
def format_mr2(cwl, rtt_wr):
cwl_to_mr2 = {
9: 0b000,
10: 0b001,
11: 0b010,
12: 0b011,
14: 0b100,
16: 0b101,
18: 0b110,
20: 0b111
}
mr2 = cwl_to_mr2[cwl] << 3
mr2 |= rtt_wr << 9
return mr2
def format_mr3(fine_refresh_mode):
fine_refresh_mode_to_mr3 = {
"1x": 0b000,
"2x": 0b001,
"4x": 0b010
}
mr3 = fine_refresh_mode_to_mr3[fine_refresh_mode] << 6
return mr3
def format_mr6(tccd):
tccd_to_mr6 = {
4: 0b000,
5: 0b001,
6: 0b010,
7: 0b011,
8: 0b100
}
mr6 = tccd_to_mr6[tccd] << 10
return mr6
z_to_rtt_nom = {
"disabled" : 0b000,
"60ohm" : 0b001,
"120ohm" : 0b010,
"40ohm" : 0b011,
"240ohm" : 0b100,
"48ohm" : 0b101,
"80ohm" : 0b110,
"34ohm" : 0b111
}
z_to_rtt_wr = {
"disabled" : 0b000,
"120ohm" : 0b001,
"240ohm" : 0b010,
"high-z" : 0b011,
"80ohm" : 0b100,
}
z_to_ron = {
"34ohm" : 0b00,
"48ohm" : 0b01,
}
# default electrical settings (point to point)
rtt_nom = "40ohm"
rtt_wr = "120ohm"
ron = "34ohm"
tdqs = 0
dm = 1
assert not (dm and tdqs)
# override electrical settings if specified
if hasattr(phy_settings, "rtt_nom"):
rtt_nom = phy_settings.rtt_nom
if hasattr(phy_settings, "rtt_wr"):
rtt_wr = phy_settings.rtt_wr
if hasattr(phy_settings, "ron"):
ron = phy_settings.ron
if getattr(phy_settings, "tdqs", False):
tdqs = 1
wr = max(timing_settings.tWTR*phy_settings.nphases, 10) # >= ceiling(tWR/tCK)
mr0 = format_mr0(bl, cl, wr, 1)
mr1 = format_mr1(1, z_to_ron[ron], z_to_rtt_nom[rtt_nom], tdqs)
mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
mr3 = format_mr3(timing_settings.fine_refresh_mode)
mr4 = 0
mr5 = (dm << 10)
mr6 = format_mr6(4) # FIXME: tCCD
rdimm_init = []
if phy_settings.is_rdimm:
def get_coarse_speed(tck, pll_bypass):
# JESD82-31A page 78
f_to_coarse_speed = {
1600e6: 0,
1866e6: 1,
2133e6: 2,
2400e6: 3,
2666e6: 4,
2933e6: 5,
3200e6: 6,
}
if pll_bypass:
return 7
else:
for f, speed in f_to_coarse_speed.items():
if tck >= 2/f:
return speed
raise ValueError
def get_fine_speed(tck):
# JESD82-31A page 83
freq = 2/tck
fine_speed = int((freq - 1240e6) // 20e6)
fine_speed = max(fine_speed, 0)
fine_speed = min(fine_speed, 0b1100001)
return fine_speed
coarse_speed = get_coarse_speed(phy_settings.tck, phy_settings.rcd_pll_bypass)
fine_speed = get_fine_speed(phy_settings.tck)
rcd_reset = 0x060 | 0x0 # F0RC06: command space control; 0: reset RCD
f0rc0f = 0x0F0 | 0x4 # F0RC05: 0 nCK latency adder
f0rc03 = 0x030 | phy_settings.rcd_ca_cs_drive # F0RC03: CA/CS drive strength
f0rc04 = 0x040 | phy_settings.rcd_odt_cke_drive # F0RC04: ODT/CKE drive strength
f0rc05 = 0x050 | phy_settings.rcd_clk_drive # F0RC04: ODT/CKE drive strength
f0rc0a = 0x0A0 | coarse_speed # F0RC0A: coarse speed selection and PLL bypass
f0rc3x = 0x300 | fine_speed # F0RC3x: fine speed selection
rdimm_init = [
("Reset RCD", rcd_reset, 7, cmds["MODE_REGISTER"], 50000),
("Load RCD F0RC0F", f0rc0f, 7, cmds["MODE_REGISTER"], 100),
("Load RCD F0RC03", f0rc03, 7, cmds["MODE_REGISTER"], 100),
("Load RCD F0RC04", f0rc04, 7, cmds["MODE_REGISTER"], 100),
("Load RCD F0RC05", f0rc05, 7, cmds["MODE_REGISTER"], 100),
("Load RCD F0RC0A", f0rc0a, 7, cmds["MODE_REGISTER"], 100),
("Load RCD F0RC3X", f0rc3x, 7, cmds["MODE_REGISTER"], 100),
]
init_sequence = [
("Release reset", 0x0000, 0, cmds["UNRESET"], 50000),
("Bring CKE high", 0x0000, 0, cmds["CKE"], 10000),
] + rdimm_init + [
("Load Mode Register 3", mr3, 3, cmds["MODE_REGISTER"], 0),
("Load Mode Register 6", mr6, 6, cmds["MODE_REGISTER"], 0),
("Load Mode Register 5", mr5, 5, cmds["MODE_REGISTER"], 0),
("Load Mode Register 4", mr4, 4, cmds["MODE_REGISTER"], 0),
("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
]
return init_sequence, {1: mr1}
# LPDDR4 -------------------------------------------------------------------------------------------
def get_lpddr4_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
cwl = phy_settings.cwl
bl = 16
dq_odt = getattr(phy_settings, "dq_odt", "RZQ/2")
ca_odt = getattr(phy_settings, "ca_odt", "RZQ/2")
pull_down_drive_strength = getattr(phy_settings, "pull_down_drive_strength", "RZQ/2")
vref_ca_range = getattr(phy_settings, "vref_ca_range", 1)
vref_ca = getattr(phy_settings, "vref_ca", 30.4)
vref_dq_range = getattr(phy_settings, "vref_dq_range", 1)
vref_dq = getattr(phy_settings, "vref_dq", 30.4)
def get_nwr():
frequency_ranges = [ # Table 28. Frequency Ranges for RL, WL, nWR, and nRTP Settings
# RL (DBI) WL (set) nWR nRTP frequency
# w/o w/ A B > <=
[( 6, 6), ( 4, 4), 6, 8, ( 10, 266)],
[(10, 12), ( 6, 8), 10, 8, ( 266, 533)],
[(14, 16), ( 8, 12), 16, 8, ( 533, 800)],
[(20, 22), (10, 18), 20, 8, ( 800, 1066)],
[(24, 28), (12, 22), 24, 10, (1066, 1333)],
[(28, 32), (14, 26), 30, 12, (1333, 1600)],
[(32, 36), (16, 30), 34, 14, (1600, 1866)],
[(36, 40), (18, 34), 40, 16, (1866, 2133)],
]
# We use no DBI and WL set A
for (rl, _), (wl, _), nwr, nrtp, (fmin, fmax) in frequency_ranges:
if rl == cl:
assert wl == cwl, "Wrong (RL, WL) combination"
return nwr
nwr = get_nwr()
odt_map = {
"disable": 0b000,
"RZQ/1": 0b001,
"RZQ/2": 0b010,
"RZQ/3": 0b011,
"RZQ/4": 0b100,
"RZQ/5": 0b101,
"RZQ/6": 0b110,
}
# Table 215: VREF Setting for Range[0] and Range[1] (LPDDR4 1.10V VDDQ)
# vref_ranges[range][percent_vddx]
vref_ranges = {
0: {
10.0: 0b000000, 10.4: 0b000001, 10.8: 0b000010, 11.2: 0b000011, 11.6: 0b000100,
12.0: 0b000101, 12.4: 0b000110, 12.8: 0b000111, 13.2: 0b001000, 13.6: 0b001001,
14.0: 0b001010, 14.4: 0b001011, 14.8: 0b001100, 15.2: 0b001101, 15.6: 0b001110,
16.0: 0b001111, 16.4: 0b010000, 16.8: 0b010001, 17.2: 0b010010, 17.6: 0b010011,
18.0: 0b010100, 18.4: 0b010101, 18.8: 0b010110, 19.2: 0b010111, 19.6: 0b011000,
20.0: 0b011001, 20.4: 0b011010, 20.8: 0b011011, 21.2: 0b011100, 21.6: 0b011101,
22.0: 0b011110, 22.4: 0b011111, 22.8: 0b100000, 23.2: 0b100001, 23.6: 0b100010,
24.0: 0b100011, 24.4: 0b100100, 24.8: 0b100101, 25.2: 0b100110, 25.6: 0b100111,
26.0: 0b101000, 26.4: 0b101001, 26.8: 0b101010, 27.2: 0b101011, 27.6: 0b101100,
28.0: 0b101101, 28.4: 0b101110, 28.8: 0b101111, 29.2: 0b110000, 29.6: 0b110001,
30.0: 0b110010,
},
1: {
22.0: 0b000000, 22.4: 0b000001, 22.8: 0b000010, 23.2: 0b000011, 23.6: 0b000100,
24.0: 0b000101, 24.4: 0b000110, 24.8: 0b000111, 25.2: 0b001000, 25.6: 0b001001,
26.0: 0b001010, 26.4: 0b001011, 26.8: 0b001100, 27.2: 0b001101, 27.6: 0b001110,
28.0: 0b001111, 28.4: 0b010000, 28.8: 0b010001, 29.2: 0b010010, 29.6: 0b010011,
30.0: 0b010100, 30.4: 0b010101, 30.8: 0b010110, 31.2: 0b010111, 31.6: 0b011000,
32.0: 0b011001, 32.4: 0b011010, 32.8: 0b011011, 33.2: 0b011100, 33.6: 0b011101,
34.0: 0b011110, 34.4: 0b011111, 34.8: 0b100000, 35.2: 0b100001, 35.6: 0b100010,
36.0: 0b100011, 36.4: 0b100100, 36.8: 0b100101, 37.2: 0b100110, 37.6: 0b100111,
38.0: 0b101000, 38.4: 0b101001, 38.8: 0b101010, 39.2: 0b101011, 39.6: 0b101100,
40.0: 0b101101, 40.4: 0b101110, 40.8: 0b101111, 41.2: 0b110000, 41.6: 0b110001,
42.0: 0b110010,
},
}
def reg(fields):
regval = 0
written = 0
for shift, width, val in fields:
mask = (2**width - 1) << shift
assert written & mask == 0, "Would overwrite another field, xor=0b{:032b}".format(mask ^ written)
assert val < 2**width, "Value larger than field width: val={}, width={}".format(val, width)
regval |= (val << shift) & mask
written |= mask
return regval
mr = {}
mr[1] = reg([
(0, 2, {16: 0b00, 32: 0b01, "on-the-fly": 0b10}[bl]),
(2, 1, 1), # 2tCK WR preamble
(3, 1, 0), # static RD preamble
(4, 3, {
6: 0b000,
10: 0b001,
16: 0b010,
20: 0b011,
24: 0b100,
30: 0b101,
34: 0b110,
40: 0b111,
}[nwr]),
(7, 1, 0), # 0.5tCK RD postamble
])
mr[2] = reg([
(0, 3, { # RL assuming DBI-RD disabled
6: 0b000,
10: 0b001,
14: 0b010,
20: 0b011,
24: 0b100,
28: 0b101,
32: 0b110,
36: 0b111,
}[cl]),
(3, 3, { # WL, set A
4: 0b000,
6: 0b001,
8: 0b010,
10: 0b011,
12: 0b100,
14: 0b101,
16: 0b110,
18: 0b111,
}[cwl]),
(6, 1, 0), # use set A
(7, 1, 0), # write leveling disabled
])
mr[3] = reg([ # defaults
(0, 1, 1),
(1, 1, 0),
(2, 1, 0),
(3, 3, odt_map[pull_down_drive_strength]),
(6, 1, 0),
(7, 1, 0),
])
mr[11] = reg([
(0, 3, odt_map[dq_odt]),
(4, 3, odt_map[ca_odt]),
])
mr[12] = reg([
(0, 6, vref_ranges[vref_ca_range][vref_ca]), # Vref(CA) % of VDD2
(6, 1, vref_ca_range),
])
mr[14] = reg([
(0, 6, vref_ranges[vref_dq_range][vref_dq]), # Vref(DQ) % of VDDQ
(6, 1, vref_dq_range),
])
mr[13] = 0 # defaults (data mask enabled, frequency set point 0)
from litedram.phy.lpddr4.commands import SpecialCmd, MPC
def cmd_mr(ma):
# Convert Mode Register Write command to DFI as expected by PHY
op = mr[ma]
assert ma < 2**6, "MR address to big: {}".format(ma)
assert op < 2**8, "MR opcode to big: {}".format(op)
a = op
ba = ma
return ("Load More Register {}".format(ma), a, ba, cmds["MODE_REGISTER"], 200)
def ck(sec):
# FIXME: use sys_clk_freq (should be added e.g. to TimingSettings), using arbitrary value for now
fmax = 200e6
return int(math.ceil(sec * fmax))
init_sequence = [
# Perform "Reset Initialization with Stable Power"
# We assume that loading the bistream will take at least tINIT1 (200us)
# Because LiteDRAM will start with reset_n=1 during hw control, first reset the chip (for tPW_RESET)
("Assert reset", 0x0000, 0, "DFII_CONTROL_ODT", ck(100e-9)),
("Release reset", 0x0000, 0, cmds["UNRESET"], ck(2e-3)),
("Bring CKE high", 0x0000, 0, cmds["CKE"], ck(2e-6)),
*[cmd_mr(ma) for ma in sorted(mr.keys())],
("ZQ Calibration start", MPC.ZQC_START, SpecialCmd.MPC, "DFII_COMMAND_WE|DFII_COMMAND_CS", ck(1e-6)),
("ZQ Calibration latch", MPC.ZQC_LATCH, SpecialCmd.MPC, "DFII_COMMAND_WE|DFII_COMMAND_CS", max(8, ck(30e-9))),
]
return init_sequence, mr
# Init Sequence ------------------------------------------------------------------------------------
def get_sdram_phy_init_sequence(phy_settings, timing_settings):
return {
"SDR": get_sdr_phy_init_sequence,
"DDR": get_ddr_phy_init_sequence,
"LPDDR": get_lpddr_phy_init_sequence,
"DDR2": get_ddr2_phy_init_sequence,
"DDR3": get_ddr3_phy_init_sequence,
"DDR4": get_ddr4_phy_init_sequence,
"LPDDR4": get_lpddr4_phy_init_sequence,
}[phy_settings.memtype](phy_settings, timing_settings)
# C Header -----------------------------------------------------------------------------------------
class CGenerator(list):
# C code generator - list of strings (=lines) or CGenerator instances (sub-generators)
def __init__(self, indent=0, indent_str="\t"):
self.indent = indent
self.indent_str = indent_str
def __iadd__(self, x):
# make `c += "int x = 0;"` append it as line, not char-by-char
if isinstance(x, str):
x = [x]
return super().__iadd__(x)
def header_guard(self, name):
self._header_guard = name
def generate_lines(self):
if getattr(self, "_header_guard", None) is not None:
self.insert(0, f"#ifndef {self._header_guard}")
self.insert(1, f"#define {self._header_guard}")
self.insert(2, "")
self.append("")
self.append(f"#endif /* {self._header_guard} */")
self._header_guard = None
lines = []
for entry in self:
if isinstance(entry, CGenerator):
lines.extend(entry.generate_lines())
else:
line = (self.indent * self.indent_str) + entry
lines.append(line.rstrip())
return lines
def generate(self):
lines = self.generate_lines()
return "\n".join(lines).strip() + "\n"
def include(self, path):
self.append(f"#include {path}")
def define(self, var, value=None):
if isinstance(value, (int, float)):
value = str(value)
self.append(f"#define {var}" + (f" {value}" if value is not None else ""))
def newline(self, n=1):
self.extend([""] * n)
@contextmanager
def block(self, head=None, newline=True):
if head is not None:
self.append(head + (" {" if not newline else ""))
if newline:
self.append("{")
else:
self.append("{")
subgenerator = CGenerator(indent=self.indent + 1, indent_str=self.indent_str)
yield subgenerator
self.append(subgenerator)
self.append("}")
def get_sdram_phy_c_header(phy_settings, timing_settings):
r = CGenerator()
r.header_guard("__GENERATED_SDRAM_PHY_H")
r.include("<hw/common.h>")
r.include("<generated/csr.h>")
r.newline()
r.define("DFII_CONTROL_SEL", "0x01")
r.define("DFII_CONTROL_CKE", "0x02")
r.define("DFII_CONTROL_ODT", "0x04")
r.define("DFII_CONTROL_RESET_N", "0x08")
r.newline()
r.define("DFII_COMMAND_CS", "0x01")
r.define("DFII_COMMAND_WE", "0x02")
r.define("DFII_COMMAND_CAS", "0x04")
r.define("DFII_COMMAND_RAS", "0x08")
r.define("DFII_COMMAND_WRDATA", "0x10")
r.define("DFII_COMMAND_RDDATA", "0x20")
r.newline()
phytype = phy_settings.phytype.upper()
nphases = phy_settings.nphases
# Define PHY type and number of phases
r.define(f"SDRAM_PHY_{phytype}")
r.define("SDRAM_PHY_XDR", 1 if phy_settings.memtype == "SDR" else 2)
r.define("SDRAM_PHY_DATABITS", phy_settings.databits)
r.define("SDRAM_PHY_DFI_DATABITS", phy_settings.dfi_databits)
r.define("SDRAM_PHY_PHASES", nphases)
for setting in ["cl", "cwl", "cmd_latency", "cmd_delay"]:
if getattr(phy_settings, setting, None) is not None:
r.define(f"SDRAM_PHY_{setting.upper()}", getattr(phy_settings, setting))
# Define PHY Read.Write phases
rdphase = phy_settings.rdphase
if isinstance(rdphase, Signal): rdphase = rdphase.reset.value
r.define("SDRAM_PHY_RDPHASE", rdphase)
wrphase = phy_settings.wrphase
if isinstance(wrphase, Signal): wrphase = wrphase.reset.value
r.define("SDRAM_PHY_WRPHASE", wrphase)
# Define Read/Write Leveling capability
if phy_settings.write_leveling:
r.define("SDRAM_PHY_WRITE_LEVELING_CAPABLE")
if phy_settings.write_latency_calibration:
r.define("SDRAM_PHY_WRITE_LATENCY_CALIBRATION_CAPABLE")
if phy_settings.write_dq_dqs_training:
r.define("SDRAM_PHY_WRITE_DQ_DQS_TRAINING_CAPABLE")
if phy_settings.read_leveling:
r.define("SDRAM_PHY_READ_LEVELING_CAPABLE")
# Define number of modules/delays/bitslips
r.define("SDRAM_PHY_MODULES", "(SDRAM_PHY_DATABITS/8)")
if phy_settings.delays > 0:
r.define("SDRAM_PHY_DELAYS", phy_settings.delays)
if phy_settings.bitslips > 0:
r.define("SDRAM_PHY_BITSLIPS", phy_settings.bitslips)
if phy_settings.is_rdimm:
assert phy_settings.memtype == "DDR4"
r.define("SDRAM_PHY_DDR4_RDIMM")
r.newline()
r += "void cdelay(int i);"
r.newline()
# Commands functions
for n in range(nphases):
with r.block(f"__attribute__((unused)) static inline void command_p{n}(int cmd)") as b:
b += f"sdram_dfii_pi{n}_command_write(cmd);"
b += f"sdram_dfii_pi{n}_command_issue_write(1);"
r.newline()
# Write/Read functions
r.define("DFII_PIX_DATA_SIZE", "CSR_SDRAM_DFII_PI0_WRDATA_SIZE")
r.newline()
for data in ["wrdata", "rddata"]:
with r.block(f"static inline unsigned long sdram_dfii_pix_{data}_addr(int phase)") as b:
with b.block("switch (phase)", newline=False) as s:
for n in range(nphases):
s += f"case {n}: return CSR_SDRAM_DFII_PI{n}_{data.upper()}_ADDR;"
s += "default: return 0;"
r.newline()
init_sequence, mr = get_sdram_phy_init_sequence(phy_settings, timing_settings)
if phy_settings.memtype in ["DDR3", "DDR4"]:
# The value of MR1[7] needs to be modified during write leveling
r.define("DDRX_MR_WRLVL_ADDRESS", 1)
r.define("DDRX_MR_WRLVL_RESET", mr[1])
r.define("DDRX_MR_WRLVL_BIT", 7)
r.newline()
elif phy_settings.memtype in ["LPDDR4"]:
# Write leveling enabled by MR2[7]
r.define("DDRX_MR_WRLVL_ADDRESS", 2)
r.define("DDRX_MR_WRLVL_RESET", mr[2])
r.define("DDRX_MR_WRLVL_BIT", 7)
r.newline()
with r.block("static inline void init_sequence(void)") as b:
for comment, a, ba, cmd, delay in init_sequence:
invert_masks = [(0, 0), ]
if phy_settings.is_rdimm:
assert phy_settings.memtype == "DDR4"
# JESD82-31A page 38
#
# B-side chips have certain usually-inconsequential address and BA
# bits inverted by the RCD to reduce SSO current. For mode register
# writes, however, we must compensate for this. BG[1] also directs
# writes either to the A side (BG[1]=0) or B side (BG[1]=1)
#
# The 'ba != 7' is because we don't do this to writes to the RCD
# itself.
if ba != 7:
invert_masks.append((0b10101111111000, 0b1111))
for a_inv, ba_inv in invert_masks:
b += f"/* {comment} */"
b += f"sdram_dfii_pi0_address_write({a ^ a_inv:#x});"
b += f"sdram_dfii_pi0_baddress_write({ba ^ ba_inv:d});"
if cmd.startswith("DFII_CONTROL"):
b += f"sdram_dfii_control_write({cmd});"
else:
b += f"command_p0({cmd});"
if delay:
b += f"cdelay({delay});\n"
b.newline()
return r.generate()
# Python Header ------------------------------------------------------------------------------------
def get_sdram_phy_py_header(phy_settings, timing_settings):
r = ""
r += "dfii_control_sel = 0x01\n"
r += "dfii_control_cke = 0x02\n"
r += "dfii_control_odt = 0x04\n"
r += "dfii_control_reset_n = 0x08\n"
r += "\n"
r += "dfii_command_cs = 0x01\n"
r += "dfii_command_we = 0x02\n"
r += "dfii_command_cas = 0x04\n"
r += "dfii_command_ras = 0x08\n"
r += "dfii_command_wrdata = 0x10\n"
r += "dfii_command_rddata = 0x20\n"
r += "\n"
init_sequence, mr = get_sdram_phy_init_sequence(phy_settings, timing_settings)
if mr is not None and 1 in mr:
r += "ddrx_mr1 = 0x{:x}\n".format(mr[1])
r += "\n"
r += "init_sequence = [\n"
for comment, a, ba, cmd, delay in init_sequence:
r += f" (\"{comment}\", {a}, {ba}, {cmd.lower()}, {delay}),\n"
r += "]\n"
return r
| 36.37212
| 125
| 0.530645
|
ec5ce2b9edf8897bf42395399df660457cf91354
| 1,197
|
py
|
Python
|
test/test_phones.py
|
alkava/python_traning
|
ce7334572cea7de08de8951b240e506ea9cd87a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
alkava/python_traning
|
ce7334572cea7de08de8951b240e506ea9cd87a7
|
[
"Apache-2.0"
] | null | null | null |
test/test_phones.py
|
alkava/python_traning
|
ce7334572cea7de08de8951b240e506ea9cd87a7
|
[
"Apache-2.0"
] | null | null | null |
import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
assert contact_from_view_page.mobilephone == contact_from_edit_page.mobilephone
assert contact_from_view_page.secondaryphone == contact_from_edit_page.secondaryphone
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
| 44.333333
| 120
| 0.751044
|
13e900db384a36c96161a1f4a3e26e29ca257abf
| 328
|
py
|
Python
|
floppyforms/__init__.py
|
jonashaag/django-floppyforms
|
eec8a0c1902e2bcdbf1bc05f0f5ff5403cae7afd
|
[
"BSD-3-Clause"
] | null | null | null |
floppyforms/__init__.py
|
jonashaag/django-floppyforms
|
eec8a0c1902e2bcdbf1bc05f0f5ff5403cae7afd
|
[
"BSD-3-Clause"
] | null | null | null |
floppyforms/__init__.py
|
jonashaag/django-floppyforms
|
eec8a0c1902e2bcdbf1bc05f0f5ff5403cae7afd
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
from django.forms import (BaseModelForm, model_to_dict, fields_for_model,
save_instance, ValidationError, Media,
MediaDefiningClass)
from .fields import *
from .forms import *
from .models import *
from .widgets import *
from . import gis
__version__ = '1.0'
| 25.230769
| 73
| 0.652439
|
8e4b7ff667d963e203fa625799aadab2eab7c0c8
| 946
|
py
|
Python
|
odziez/clothes/forms.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
odziez/clothes/forms.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
odziez/clothes/forms.py
|
szymanskirafal/odziez
|
029d20da0474a0380e8383f9f89c1072666c5399
|
[
"MIT"
] | null | null | null |
from django.forms import HiddenInput, ModelForm
from .models import Clothe
class ClotheCreateForm(ModelForm):
class Meta:
model = Clothe
fields = [
'ordered',
'received',
'destroyed',
]
widgets = {
'ordered': HiddenInput,
'received': HiddenInput,
'destroyed': HiddenInput,
}
class ClotheDeliveredForm(ModelForm):
class Meta:
model = Clothe
fields = [
'ordered',
'received',
'delivered_ok',
'delivered_with_defects',
'not_delivered',
'in_use',
]
widgets = {
'ordered': HiddenInput,
'received': HiddenInput,
'delivered_ok': HiddenInput,
'delivered_with_defects': HiddenInput,
'not_delivered': HiddenInput,
'in_use': HiddenInput,
}
| 23.65
| 50
| 0.502114
|
6120674715a508a97f53ed3389f32ae4ce2da505
| 319
|
py
|
Python
|
medtech_bpa/medtech_bpa/custom_scripts/delivery_note/delivery_note.py
|
sds2402/MedTech-BPA-1
|
9b159cb619d363cc89678365642f9af6fd9f59b5
|
[
"MIT"
] | 1
|
2021-03-25T12:51:19.000Z
|
2021-03-25T12:51:19.000Z
|
medtech_bpa/medtech_bpa/custom_scripts/delivery_note/delivery_note.py
|
sds2402/MedTech-BPA-1
|
9b159cb619d363cc89678365642f9af6fd9f59b5
|
[
"MIT"
] | 1
|
2021-11-08T07:20:32.000Z
|
2021-11-08T07:20:32.000Z
|
medtech_bpa/medtech_bpa/custom_scripts/delivery_note/delivery_note.py
|
sds2402/MedTech-BPA-1
|
9b159cb619d363cc89678365642f9af6fd9f59b5
|
[
"MIT"
] | 9
|
2021-01-04T10:21:57.000Z
|
2021-12-08T12:44:48.000Z
|
from __future__ import unicode_literals
import frappe
def validate(doc, method):
so_name = [row.against_sales_order for row in doc.items if row.against_sales_order]
if so_name:
so_doc =frappe.get_doc("Sales Order", so_name[0])
so_doc.workflow_state = "Pending Dispatch"
so_doc.db_update()
frappe.db.commit()
| 29
| 84
| 0.774295
|
15a0cc1d9daaac15254628060f71077f94ff198a
| 2,864
|
py
|
Python
|
var/spack/repos/builtin/packages/spdlog/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2020-08-13T15:24:33.000Z
|
2021-10-18T18:38:19.000Z
|
var/spack/repos/builtin/packages/spdlog/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-02-26T11:44:34.000Z
|
2022-03-12T12:14:50.000Z
|
var/spack/repos/builtin/packages/spdlog/package.py
|
carlabguillen/spack
|
7070bb892f9bdb5cf9e76e0eecd64f6cc5f4695c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-01-22T14:01:28.000Z
|
2020-07-23T21:35:12.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spdlog(CMakePackage):
"""Very fast, header only, C++ logging library"""
homepage = "https://github.com/gabime/spdlog"
url = "https://github.com/gabime/spdlog/archive/v0.9.0.tar.gz"
version('1.5.0', sha256='b38e0bbef7faac2b82fed550a0c19b0d4e7f6737d5321d4fd8f216b80f8aee8a')
version('1.4.2', sha256='821c85b120ad15d87ca2bc44185fa9091409777c756029125a02f81354072157')
version('1.4.1', sha256='3291958eb54ed942d1bd3aef1b4f8ccf70566cbc04d34296ec61eb96ceb73cff')
version('1.2.1', sha256='867a4b7cedf9805e6f76d3ca41889679054f7e5a3b67722fe6d0eae41852a767')
version('1.2.0', sha256='0ba31b9e7f8e43a7be328ab0236d57810e5d4fc8a1a7842df665ae22d5cbd128')
version('1.1.0', sha256='3dbcbfd8c07e25f5e0d662b194d3a7772ef214358c49ada23c044c4747ce8b19')
version('1.0.0', sha256='90d5365121bcd2c41ce94dfe6a460e89507a2dfef6133fe5fad5bb35ac4ef0a1')
version('0.17.0', sha256='94f74fd1b3344733d1db3de2ec22e6cbeb769f93a8baa0d4a22b1f62dc7369f8')
version('0.16.3', sha256='b88d7be261d9089c817fc8cee6c000d69f349b357828e4c7f66985bc5d5360b8')
version('0.16.2', sha256='2081e5df5e87402398847431e16b87c71dd5c4d632314bb976ace8161f4d32de')
version('0.16.1', sha256='733260e1fbdcf1b3dc307fc585e4476240026de8be28eb905731d2ab0942deae')
version('0.16.0', sha256='9e64e3b10c2a3c54dfff63aa056057cf1db8a5fd506b3d9cf77207511820baac')
version('0.14.0', sha256='eb5beb4e53f4bfff5b32eb4db8588484bdc15a17b90eeefef3a9fc74fec1d83d')
version('0.13.0', sha256='d798a6ca19165f0a18a43938859359269f5a07fd8e0eb83ab8674739c9e8f361')
version('0.12.0', sha256='5cfd6a0b3182a88e1eb35bcb65a7ef9035140d7c73b16ba6095939dbf07325b9')
version('0.11.0', sha256='8c0f1810fb6b7d23fef70c2ea8b6fa6768ac8d18d6e0de39be1f48865e22916e')
version('0.10.0', sha256='fbbc53c1cc09b93b4c3d76b683bbe9315e2efe3727701227374dce6aa4264075')
version('0.9.0', sha256='bbbe5a855c8b309621352921d650449eb2f741d35d55ec50fb4d8122ddfb8f01')
variant('shared', default=True,
description='Build shared libraries (v1.4.0+)')
depends_on('cmake@3.2:', type='build')
def cmake_args(self):
spec = self.spec
args = []
if self.spec.version >= Version('1.4.0'):
args.extend([
'-DSPDLOG_BUILD_SHARED:BOOL={0}'.format(
'ON' if '+shared' in spec else 'OFF'),
# tests and examples
'-DSPDLOG_BUILD_TESTS:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF'),
'-DSPDLOG_BUILD_EXAMPLE:BOOL={0}'.format(
'ON' if self.run_tests else 'OFF')
])
return args
| 51.142857
| 96
| 0.732891
|
451048f5741ef34db7dcfa4ec2f7767d50ed6226
| 30,231
|
py
|
Python
|
src/regressor_bank.py
|
Paratra/IoTAnalytics_pub
|
8c1d02b60ef609c3cba654ce4a5568c39fc63edf
|
[
"MIT"
] | null | null | null |
src/regressor_bank.py
|
Paratra/IoTAnalytics_pub
|
8c1d02b60ef609c3cba654ce4a5568c39fc63edf
|
[
"MIT"
] | null | null | null |
src/regressor_bank.py
|
Paratra/IoTAnalytics_pub
|
8c1d02b60ef609c3cba654ce4a5568c39fc63edf
|
[
"MIT"
] | 1
|
2021-09-01T13:10:31.000Z
|
2021-09-01T13:10:31.000Z
|
'''
author: ming
ming.song.cn@outlook.com
copyright@2020
'''
import os
from pdb import Pdb
import sys
import numpy as np
import torch
from torch.optim import *
from torch import nn, optim, cuda
from torch.utils.data import Dataset, DataLoader
# from torch.utils.data import
from sklearn import preprocessing
import copy
from random import sample
from math import isnan
import datetime
import pickle
from scgkit2.signal.signal_distort import signal_distort
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from pdb import set_trace as st
import warnings
warnings.filterwarnings("ignore")
# batch_size = 1000
# test_only = False
# VISUAL_FLAG = False
# # test_only = bool(sys.argv[1])
# lr = 0.001
# dim_feature = 100
def get_size(input_shape,k_size,max_pool_k_size, layers):
for i in range(layers):
if i == 0:
size = int((input_shape - k_size + 1)/max_pool_k_size)
elif i == layers-1:
size = int((size - k_size + 1))
else:
size = int((size - k_size + 1)/max_pool_k_size)
return size
class Initial_Dataset(Dataset):
"""docstring for ."""
def __init__(self, X, Y): # before this length is data, after is label
self.array_Tx = X
self.array_Ty = Y
def __getitem__(self, index):
data_ = self.array_Tx[index, :]
gt_ = self.array_Ty[index, :] #
return data_, gt_
def __len__(self):
return self.array_Tx.shape[0]
#
# class CNN_LSTM_Net(nn.Module):
# """docstring for CNN_LSTM_Net."""
#
# def __init__(self, LOG=False):
# super(CNN_LSTM_Net, self).__init__()
# #### define layers
#
# ## CNN part
# self.conv1 = nn.Conv1d(in_channels=1, out_channels=128, kernel_size=3)
# self.conv2 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=7)
# self.conv3 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5)
# self.batch_norm1d_1 = nn.BatchNorm1d(128)
# self.batch_norm1d_2 = nn.BatchNorm1d(256)
# self.batch_norm1d_3 = nn.BatchNorm1d(64)
#
# self.max_pool1d = nn.MaxPool1d(kernel_size=2)
# self.prelu = nn.PReLU()
# self.dropout = nn.Dropout(p=0.5)
#
# ## LSTM part
# self.lstm = nn.LSTM(input_size=746, hidden_size=128, batch_first=True, num_layers=1)
# self.decoding_layer = nn.Linear(128, 4)
#
#
#
# def forward(self, x):
# # import pdb; pdb.set_trace()
# conv1 = self.conv1(x)
# conv1 = self.batch_norm1d_1(conv1)
# conv1 = self.prelu(conv1)
# conv1 = self.dropout(conv1)
# conv1 = self.max_pool1d(conv1)
#
# conv2 = self.conv2(conv1)
# conv2 = self.batch_norm1d_2(conv2)
# conv2 = self.prelu(conv2)
# conv2 = self.dropout(conv2)
# conv2 = self.max_pool1d(conv2)
#
# out, (hid, c) = self.lstm(conv2)
# pred = self.decoding_layer(hid[0])
#
# return pred
class LSTM(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(input_size=3000, hidden_size=128, batch_first=True, num_layers=3)
self.decoding_layer = nn.Linear(128, 4)
#
def forward(self, input_seq):
out, (hid, c) = self.lstm(input_seq)
pred = self.decoding_layer(hid[0])
return pred
class LstmAttentionNet(nn.Module):
def __init__(self, num_layers, hidden_size, output_features):
super(LstmAttentionNet, self).__init__()
# hidden_size = 100
attention_size = hidden_size
self.lstm = nn.LSTM(input_size=1, hidden_size=hidden_size, batch_first=True, num_layers=num_layers)
self.w_omega = nn.Parameter(torch.randn(hidden_size,attention_size))
self.b_omega = nn.Parameter(torch.randn(attention_size))
self.u_omega = nn.Parameter(torch.randn(attention_size,1))
self.decoding_layer = nn.Linear(hidden_size, output_features)
def forward(self, x):
# import pdb; pdb.set_trace()
x = x.unsqueeze(2)
out, (h, c) = self.lstm(x)
v = torch.matmul(out,self.w_omega)+self.b_omega
vu = torch.matmul(v, self.u_omega)
weight= nn.functional.softmax(vu,dim=1)
out_weighted = torch.sum(out*weight,1)
y_pred = self.decoding_layer(out_weighted)
return y_pred#, weight
class CNN_Net(nn.Module):
"""docstring for CNN_Net."""
def __init__(self, input_shape, layers, output_features, out_channels, kernel_size):
super(CNN_Net, self).__init__()
#### define layers
assert len(out_channels) == layers
self.layers = layers
self.out_channels = out_channels
# ## CNN part
self.net = nn.ModuleList()
self.batch_norm = nn.ModuleList()
for i in range(layers):
if i == 0:
self.net.append( nn.Conv1d(in_channels=1, out_channels=out_channels[i], kernel_size=kernel_size) )
else:
self.net.append( nn.Conv1d(in_channels=out_channels[i-1], out_channels=out_channels[i], kernel_size=kernel_size) )
self.batch_norm.append( nn.BatchNorm1d(out_channels[i]) )
# , nn.BatchNorm1d(out_channels[i])
# self.conv1 = nn.Conv1d(in_channels=1, out_channels=128, kernel_size=5)
# self.conv2 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=5)
# self.conv3 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5)
# self.batch_norm1d_1 = nn.BatchNorm1d(128)
# self.batch_norm1d_2 = nn.BatchNorm1d(256)
# self.batch_norm1d_3 = nn.BatchNorm1d(64)
self.max_pool1d = nn.MaxPool1d(kernel_size=3)
self.prelu = nn.PReLU()
self.dropout = nn.Dropout(p=0.5)
## LSTM part
# self.lstm = nn.LSTM(input_size=3000, hidden_size=64, batch_first=True, num_layers=1)
# self.decoding_layer1 = nn.Linear(self.flatten_size, 128)
# st()
# flatten_size =
flatten_size = get_size(input_shape=input_shape,k_size=kernel_size,max_pool_k_size=3, layers=layers )
self.decoding_layer1 = nn.Linear(flatten_size*out_channels[-1], 128)
self.decoding_layer2 = nn.Linear(128, output_features)
self.flatten = nn.Flatten()
def forward(self, x):
# import pdb; pdb.set_trace()
x = torch.unsqueeze(x, 1)
for i in range(self.layers):
if i == self.layers - 1:
x = self.net[i](x)
else:
# # st()
# self.net[i]()
x = self.net[i](x)
# st()
x = self.batch_norm[i](x)
x = torch.relu(x)
x = self.dropout(x)
x = self.max_pool1d(x)
# flatten_size = x.shape[1] * x.shape[2]
# flatten = self.flatten(x)
# self.decoding_layer1 = nn.Linear(flatten_size, 128)
# st()
flatten = self.flatten(x)
decode1 = self.decoding_layer1(flatten)
pred = self.decoding_layer2(decode1)
# st()
return pred
class AE_Net(nn.Module):
"""docstring for AE_Net."""
def __init__(self, input_shape):
super(AE_Net, self).__init__()
self.encoder_hidden_layer = nn.Linear(
in_features=input_shape, out_features=128
)
self.encoder_output_layer = nn.Linear(
in_features=128, out_features=64
)
self.decoder_hidden_layer = nn.Linear(
in_features=64, out_features=128
)
self.decoder_output_layer = nn.Linear(
in_features=128, out_features=input_shape
)
def forward(self, features):
activation = self.encoder_hidden_layer(features)
activation = torch.relu(activation)
state_logit = self.encoder_output_layer(activation)
# import pdb; pdb.set_trace()
code = torch.relu(state_logit)
activation = self.decoder_hidden_layer(code)
activation = torch.relu(activation)
activation = self.decoder_output_layer(activation)
reconstructed = activation
# reconstructed = torch.relu(activation)
# import pdb; pdb.set_trace()
return reconstructed, state_logit
class FCN_Net(nn.Module):
"""docstring for FCN_Net."""
def __init__(self, input_features, output_features, layers, neurons):
super(FCN_Net, self).__init__()
#### define layers
# self.net = []
self.net = nn.ModuleList()
for i in range(layers):
if i == 0:
self.net.append( nn.Linear(in_features=input_features, out_features=neurons) )
if i == layers-1:
self.net.append( nn.Linear(in_features=neurons, out_features=output_features) )
else:
self.net.append( nn.Linear(in_features=neurons, out_features=neurons) )
# self.dropout = nn.Dropout(p=0.5)
self.lrelu = nn.LeakyReLU()
def forward(self, x):
# import pdb; pdb.set_trace()
for ind, each_layer in enumerate(self.net):
if ind == len(self.net)-1:
pred = each_layer(x)
else:
x = each_layer(x)
x = torch.relu(x)
return pred
class FCN_Model():
"""docstring for FCN_Model."""
def __init__(self, input_features=1000, output_features=1, layers=6, neurons=20, learning_rate=0.001, batch_size=32, epoch_number=500):
super(FCN_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
# self.ae_Net = AE_Net(input_shape=input_shape)
self.reg_Net = FCN_Net(input_features=input_features, output_features=output_features, layers=layers, neurons=neurons)
# self.reg_Net = LstmAttentionNet()
# self.ae_Net = self.ae_Net.to(device = self.device)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
# def fit(self, all_data, window_len, devide_factor, learning_rate=0.001, batch_size=32, epoch_number=500, CONTINUE_TRAINING = False):
def fit(self, X, Y):
# self.data = all_data
# self.window_len = X.shape[1]
self.h_norm = 90
self.r_norm = 20
self.s_norm = 200
self.d_norm = 100
# data_train, data_test = self.normalize_and_devide(all_data, window_len, devide_factor)
train_dataset = Initial_Dataset(X, Y)
# self.scaler_x, self.scaler_y = train_dataset.get_scalers()
# test_dataset = Initial_Dataset(X, Y)
# import pdb; pdb.set_trace()
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
# train_tensor_x_distorted = self.batch_scg_distorted(train_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# st()
# if e % 5 == 0 or e == self.epoch_number-1:
# loss_test = []
# pred_list = []
# gt_list = []
# for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
# test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_reg = self.reg_Net(test_tensor_x)
# test_loss_tensor_reg = loss_fn(test_tensor_y,test_y_pred_reg)
# test_loss_tensor = test_loss_tensor_reg
# reg_pred_arr = test_y_pred_reg.cpu().detach().numpy().squeeze()
# reg_gt_arr = test_tensor_y.cpu().detach().numpy().squeeze()
# gt_list.append(reg_gt_arr)
# pred_list.append(reg_pred_arr)
# test_loss = test_loss_tensor.item()
# loss_test.append(test_loss)
# print(f'Epoch {e} test MSE: {np.mean(loss_test)} ')
# print(f' test REG MAE: {mean_absolute_error(gt_list, pred_list)*self.s_norm} ')
# self.error = np.mean(loss_test)
# if self.error < self.last_error:
# self.save_model(model_path='../models')
# self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('save model...')
# with open(os.path.join(model_path,"scaler_param.pk"),"wb+") as f:
# pickle.dump([self.scaler_x,self.scaler_y,self.window_len],f)
# torch.save(self.ae_Net.state_dict(), os.path.join(model_path,"AE_model_param.pk"))
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"FCN_model_param.pk"))
# with open(os.path.join(model_path,"error.pk"),"wb+") as f:
# pickle.dump(self.error,f)
print('save done!')
# test_error_0 = self.error
def load_model(self, model_path):
# if os.path.exists(os.path.join(model_path,"scaler_param.pk")):
# with open(os.path.join(model_path,"scaler_param.pk"),"rb+") as f:
# [self.scaler_x,self.scaler_y] = pickle.load(f)
# else:
# print(f'scaler_param.pk not exist!')
# quit()
if os.path.exists(os.path.join(model_path,"FCN_model_param.pk")):
# self.ae_Net.load_state_dict(torch.load(os.path.join(model_path,"AE_model_param.pk"),map_location=torch.device(self.device)))
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"FCN_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
# if os.path.exists(os.path.join(model_path,"error.pk")):
# with open(os.path.join(model_path,"error.pk"),"rb+") as f:
# self.error = pickle.load(f)
# else:
# print(f'error.pk not exist!')
# quit()
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
# return np.round(self.train_y_pred)[0]
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
class CNN_Model():
"""docstring for CNN_Model."""
def __init__(self, input_shape, out_channels, kernel_size, output_features=1, layers=6, learning_rate=0.001, batch_size=32, epoch_number=500):
super(CNN_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
# self.ae_Net = AE_Net(input_shape=input_shape)
self.reg_Net = CNN_Net(input_shape=input_shape, layers=layers, output_features=output_features, out_channels=out_channels, kernel_size=kernel_size)
# self.reg_Net = LstmAttentionNet()
# self.ae_Net = self.ae_Net.to(device = self.device)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
# def fit(self, all_data, window_len, devide_factor, learning_rate=0.001, batch_size=32, epoch_number=500, CONTINUE_TRAINING = False):
def fit(self, X, Y):
train_dataset = Initial_Dataset(X, Y)
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('saving model...')
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"CNN_model_param.pk"))
print('save done!')
def load_model(self, model_path):
if os.path.exists(os.path.join(model_path,"CNN_model_param.pk")):
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"CNN_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
# return np.round(self.train_y_pred)[0]
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
class LSTM_Model():
"""docstring for LSTM_Model."""
def __init__(self, num_layers=5, hidden_size=100, output_features=4, learning_rate=0.001, batch_size=32, epoch_number=500):
super(LSTM_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
self.reg_Net = LstmAttentionNet(num_layers=num_layers, hidden_size=hidden_size, output_features=output_features)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
def fit(self, X, Y):
train_dataset = Initial_Dataset(X, Y)
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
# st()
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('saving model...')
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"LSTM_model_param.pk"))
print('save done!')
def load_model(self, model_path='../models'):
if os.path.exists(os.path.join(model_path,"LSTM_model_param.pk")):
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"LSTM_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
def main():
scaler = preprocessing.StandardScaler()
# dataset = np.load('../../data/real_data/data_label_train.1000_6.6_6.npy')
dataset = np.load('../../data/real_data/data_label_train.1000_6.npy')[:10,:]
X = dataset[:,:-6]
Y = dataset[:,-4:-2]
# dataset_test = np.load('../../data/real_data/data_label_test.1000_6.6_6.npy')
# X_test = dataset_test[:,:-6]
# Y_test = dataset_test[:,-4:-2]
# X = scaler.fit_transform(X)
# X_test = scaler.transform(X_test)
# st()
# dataset_time_sort = dataset[np.argsort( (dataset[:, -5]) )]
# np.random.shuffle(dataset)
# auto_encoder = FCN_Model(input_features=6, output_features=2, layers=30, neurons=128, learning_rate=0.0001, batch_size=32, epoch_number=500)
# auto_encoder = CNN_Model(out_channels=[64,64,32], kernel_size=5, output_features=2, layers=3, learning_rate=0.001, batch_size=32, epoch_number=500)
auto_encoder = LSTM_Model(num_layers=1, hidden_size=100, output_features=2, learning_rate=0.001, batch_size=32, epoch_number=500)
auto_encoder.fit(X, Y)
auto_encoder.load_model('../models')
auto_encoder.evaluate(X_test, Y_test)
if __name__ == '__main__':
main()
| 35.316589
| 155
| 0.616817
|
b72ccc9acbc0640e410d65ff6fdcd16e2292324f
| 794
|
py
|
Python
|
rest_json_helper.py
|
vzaliva/xbee_temp_sensor
|
1b1dd275687c2aea2f22a4feb9db5f87a18ad598
|
[
"Unlicense"
] | 1
|
2016-05-24T23:56:21.000Z
|
2016-05-24T23:56:21.000Z
|
rest_json_helper.py
|
vzaliva/xbee_temp_sensor
|
1b1dd275687c2aea2f22a4feb9db5f87a18ad598
|
[
"Unlicense"
] | null | null | null |
rest_json_helper.py
|
vzaliva/xbee_temp_sensor
|
1b1dd275687c2aea2f22a4feb9db5f87a18ad598
|
[
"Unlicense"
] | null | null | null |
"""
Simple helper function to do HTTP request to give URL and parse response
as a JSON document.
The main reason for this module is to isloate code working with urllib2.
In python 2.7 there is a connection leak in urllib2 which could cause
some long-term running REST API pollers to stop working.
See https://github.com/vzaliva/xbee_temp_sensor/issues/1 for details.
"""
import urllib2
import json
import subprocess
USE_URLLIB2 = False
def json_GET(endpoint, timeout):
if USE_URLLIB2:
f = urllib2.urlopen(endpoint, body, timeout)
try:
json_string = f.read()
finally:
f.close()
else:
json_string = subprocess.check_output(["curl", "-s", "-connect-timeout=%d" %timeout, endpoint])
return json.loads(json_string)
| 26.466667
| 103
| 0.693955
|
ff5fb0449d79543cbff59932218279fccbb6eead
| 5,788
|
py
|
Python
|
implicit/nmslib_als.py
|
redbubble/implicit
|
fe85f79f8b547a75e42186bf5357ad2f395366a4
|
[
"MIT"
] | null | null | null |
implicit/nmslib_als.py
|
redbubble/implicit
|
fe85f79f8b547a75e42186bf5357ad2f395366a4
|
[
"MIT"
] | null | null | null |
implicit/nmslib_als.py
|
redbubble/implicit
|
fe85f79f8b547a75e42186bf5357ad2f395366a4
|
[
"MIT"
] | null | null | null |
import itertools
import logging
import numpy
from implicit.als import AlternatingLeastSquares
from implicit.approximate_als import augment_inner_product_matrix
log = logging.getLogger("implicit")
logging.getLogger('nmslib').setLevel(logging.WARNING)
class NMSLibALSWrapper:
"""A wrapper of the :class:`~implicit.als.AlternatingLeastSquares` that uses
`NMSLib <https://github.com/searchivarius/nmslib>`_ to create approximate nearest neighbours
indices of the latent factors.
Parameters
----------
model: AlternatingLeastSquares, required
the AlternatingLeastSquares to wrap
method : str, optional
The NMSLib method to use
index_params: dict, optional
Optional params to send to the createIndex call in NMSLib
query_params: dict, optional
Optional query time params for the NMSLib 'setQueryTimeParams' call
approximate_similar_items : bool, optional
whether or not to build an NMSLIB index for computing similar_items
approximate_recommend : bool, optional
whether or not to build an NMSLIB index for the recommend call
Attributes
----------
similar_items_index : nmslib.FloatIndex
NMSLib index for looking up similar items in the cosine space formed by the latent
item_factors
recommend_index : nmslib.FloatIndex
NMSLib index for looking up similar items in the inner product space formed by the latent
item_factors
"""
def __init__(self, model: AlternatingLeastSquares,
approximate_similar_items=True, approximate_recommend=True,
method='hnsw', index_params=None, query_params=None):
self.model = model
if index_params is None:
index_params = {'M': 16, 'post': 0, 'efConstruction': 400}
if query_params is None:
query_params = {'ef': 90}
self.similar_items_index = None
self.recommend_index = None
self.approximate_similar_items = approximate_similar_items
self.approximate_recommend = approximate_recommend
self.method = method
self.index_params = index_params
self.query_params = query_params
self.max_norm = numpy.nan
def fit(self, Ciu, show_progress=True):
self.model.fit(Ciu, show_progress)
self.initialize(show_progress)
def initialize(self, show_progress=True):
import nmslib # delay import in case the library is not installed
# create index for similar_items
if self.approximate_similar_items:
log.info("Building nmslib similar items index")
self.similar_items_index = nmslib.init(
method=self.method, space='cosinesimil')
# there are some numerical instability issues here with
# building a cosine index with vectors with 0 norms, hack around this
# by just not indexing them
norms = numpy.linalg.norm(self.model.item_factors, axis=1)
ids = numpy.arange(self.model.item_factors.shape[0])
# delete zero valued rows from the matrix
item_factors = numpy.delete(self.model.item_factors, ids[norms == 0], axis=0)
ids = ids[norms != 0]
self.similar_items_index.addDataPointBatch(item_factors, ids=ids)
self.similar_items_index.createIndex(self.index_params,
print_progress=show_progress)
self.similar_items_index.setQueryTimeParams(self.query_params)
# build up a separate index for the inner product (for recommend
# methods)
if self.approximate_recommend:
log.debug("Building nmslib recommendation index")
self.max_norm, extra = augment_inner_product_matrix(
self.model.item_factors)
self.recommend_index = nmslib.init(
method='hnsw', space='cosinesimil')
self.recommend_index.addDataPointBatch(extra)
self.recommend_index.createIndex(self.index_params, print_progress=show_progress)
self.recommend_index.setQueryTimeParams(self.query_params)
def similar_items(self, itemid, N=10):
if not self.approximate_similar_items:
return self.model.similar_items(itemid, N)
neighbours, distances = self.similar_items_index.knnQuery(
self.model.item_factors[itemid], N)
return zip(neighbours, 1.0 - distances)
def recommend(self, userid, user_items, N=10, filter_items=None, recalculate_user=False,
filter_already_liked_items=False):
if not self.approximate_recommend:
return self.model.recommend(userid, user_items, N=N,
filter_items=filter_items,
recalculate_user=recalculate_user,
filter_already_liked_items=filter_already_liked_items)
user = self.model._user_factor(userid, user_items, recalculate_user)
# calculate the top N items, removing the users own liked items from
# the results
item_filter = set(filter_items) if filter_items else set()
if filter_already_liked_items:
item_filter.update(user_items[userid].indices)
count = N + len(item_filter)
query = numpy.append(user, 0)
ids, dist = self.recommend_index.knnQuery(query, count)
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product
scaling = self.max_norm * numpy.linalg.norm(query)
dist = scaling * (1.0 - dist)
return list(itertools.islice((rec for rec in zip(ids, dist) if rec[0] not in item_filter), N))
| 42.248175
| 102
| 0.666897
|
5b8d56cea06f1d748129342e9e267429e92496ce
| 174
|
py
|
Python
|
py2asm/instructions/interrupts.py
|
malikshahzad228/py2asm
|
de80070a0a166bc752657040af928da0f3f8be5b
|
[
"MIT"
] | null | null | null |
py2asm/instructions/interrupts.py
|
malikshahzad228/py2asm
|
de80070a0a166bc752657040af928da0f3f8be5b
|
[
"MIT"
] | 1
|
2020-09-05T17:11:09.000Z
|
2020-09-06T11:24:05.000Z
|
py2asm/instructions/interrupts.py
|
malikshahzad228/py2asm
|
de80070a0a166bc752657040af928da0f3f8be5b
|
[
"MIT"
] | 2
|
2020-09-02T08:05:20.000Z
|
2021-05-26T05:27:56.000Z
|
from py2asm.instructions.base import Instruction
class Int(Instruction):
name = 'INT'
def __init__(self, immediate_byte):
super().__init__(immediate_byte)
| 19.333333
| 48
| 0.718391
|
e7dcefbd7c6769734e9c88774af183f7b2a88d6f
| 1,357
|
py
|
Python
|
final_configs/mnist_frozen_DDTPRHL.py
|
manuel-delverme/difference_target_propagation
|
3e1630f7304a7367a5116ef3fb7ee9492e3b9065
|
[
"Apache-2.0"
] | 15
|
2020-11-04T04:41:14.000Z
|
2022-03-13T02:52:25.000Z
|
final_configs/mnist_frozen_DDTPRHL.py
|
manuel-delverme/difference_target_propagation
|
3e1630f7304a7367a5116ef3fb7ee9492e3b9065
|
[
"Apache-2.0"
] | null | null | null |
final_configs/mnist_frozen_DDTPRHL.py
|
manuel-delverme/difference_target_propagation
|
3e1630f7304a7367a5116ef3fb7ee9492e3b9065
|
[
"Apache-2.0"
] | 7
|
2021-01-11T01:33:49.000Z
|
2022-01-11T01:16:49.000Z
|
config = {
'lr': 0.0012747819097520146,
'target_stepsize': 0.02480955861721637,
'beta1': 0.9,
'beta2': 0.999,
'epsilon': 5.716850868633521e-05,
'lr_fb': 0.0006038728632117109,
'sigma': 0.03631642883132282,
'feedback_wd': 9.454160319664471e-05,
'beta1_fb': 0.99,
'beta2_fb': 0.999,
'epsilon_fb': 3.6548150986492877e-06,
'out_dir': 'logs/mnist/DKDTP2',
'network_type': 'DKDTP2',
'recurrent_input': False,
'hidden_fb_activation': 'tanh',
'fb_activation': 'tanh',
'initialization': 'xavier_normal',
'size_hidden_fb': 1024,
'dataset': 'mnist',
'optimizer': 'Adam',
'optimizer_fb': 'Adam',
'momentum': 0.0,
'parallel': True,
'normalize_lr': True,
'batch_size': 128,
'forward_wd': 0.0,
'epochs_fb': 10,
'not_randomized': True,
'not_randomized_fb': True,
'extra_fb_minibatches': 0,
'extra_fb_epochs': 0,
'epochs': 100,
'only_train_first_layer': True,
'train_only_feedback_parameters': False,
'num_hidden': 5,
'size_hidden': 256,
'size_input': 784,
'size_output': 10,
'hidden_activation': 'tanh',
'output_activation': 'softmax',
'no_bias': False,
'no_cuda': False,
'random_seed': 42,
'cuda_deterministic': False,
'freeze_BPlayers': False,
'multiple_hpsearch': False,
'save_logs': False,
'save_BP_angle': False,
'save_GN_angle': False,
'save_GN_activations_angle': False,
'save_BP_activations_angle': False,
'gn_damping': 0.0,
'hpsearch': False,
'log_interval': 30,
}
| 24.232143
| 40
| 0.728077
|
b4dca49fd5ffeecf8ae9fdd20e561ce7522111b1
| 905
|
py
|
Python
|
alipay/aop/api/domain/AlipayOpenPublicGroupDeleteModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayOpenPublicGroupDeleteModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayOpenPublicGroupDeleteModel.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenPublicGroupDeleteModel(object):
def __init__(self):
self._group_id = None
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
def to_alipay_dict(self):
params = dict()
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicGroupDeleteModel()
if 'group_id' in d:
o.group_id = d['group_id']
return o
| 22.073171
| 67
| 0.59779
|
6a082b42fedc1873cbb8146b65556196fab5b240
| 5,733
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/plugins/modules/ovirt_permission_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 5
|
2020-12-16T21:42:09.000Z
|
2022-03-28T16:04:32.000Z
|
Collections-a-installer/community-general-2.4.0/plugins/modules/ovirt_permission_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | null | null | null |
Collections-a-installer/community-general-2.4.0/plugins/modules/ovirt_permission_facts.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: ovirt_permission_facts
short_description: Retrieve information about one or more oVirt/RHV permissions
author: "Ondra Machacek (@machacekondra)"
deprecated:
removed_in: 3.0.0 # was Ansible 2.13
why: When migrating to collection we decided to use only _info modules.
alternative: Use M(ovirt.ovirt.ovirt_permission_info) instead.
description:
- "Retrieve information about one or more oVirt/RHV permissions."
notes:
- "This module returns a variable C(ovirt_permissions), which
contains a list of permissions. You need to register the result with
the I(register) keyword to use it."
options:
user_name:
description:
- "Username of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
group_name:
description:
- "Name of the group to manage."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment:
- community.general.ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
- name: Gather information about all permissions of user with username john
ovirt_permission_info:
user_name: john
authz_name: example.com-authz
register: result
- name: Print gathered information
ansible.builtin.debug:
msg: "{{ result.ovirt_permissions }}"
'''
RETURN = '''
ovirt_permissions:
description: "List of dictionaries describing the permissions. Permission attributes are mapped to dictionary keys,
all permissions attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
returned: On success.
type: list
'''
import traceback
try:
import ovirtsdk4 as sdk
except ImportError:
pass
from ansible.module_utils.common.removed import removed_module
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils._ovirt import (
check_sdk,
create_connection,
get_link_name,
ovirt_info_full_argument_spec,
search_by_name,
)
def _permissions_service(connection, module):
if module.params['user_name']:
service = connection.system_service().users_service()
entity = next(
iter(
service.list(
search='usrname={0}'.format(
'{0}@{1}'.format(module.params['user_name'], module.params['authz_name'])
)
)
),
None
)
else:
service = connection.system_service().groups_service()
entity = search_by_name(service, module.params['group_name'])
if entity is None:
raise Exception("User/Group wasn't found.")
return service.service(entity.id).permissions_service()
def main():
argument_spec = ovirt_info_full_argument_spec(
authz_name=dict(required=True, aliases=['domain']),
user_name=dict(default=None),
group_name=dict(default=None),
namespace=dict(default=None),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name in ('ovirt_permission_facts', 'community.general.ovirt_permission_facts')
if is_old_facts:
module.deprecate("The 'ovirt_permission_facts' module has been renamed to 'ovirt_permission_info', "
"and the renamed one no longer returns ansible_facts",
version='3.0.0', collection_name='community.general') # was Ansible 2.13
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
permissions_service = _permissions_service(connection, module)
permissions = []
for p in permissions_service.list():
newperm = dict()
for key, value in p.__dict__.items():
if value and isinstance(value, sdk.Struct):
newperm[key[1:]] = get_link_name(connection, value)
newperm['%s_id' % key[1:]] = value.id
permissions.append(newperm)
result = dict(ovirt_permissions=permissions)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| 34.329341
| 148
| 0.677481
|
487da42a4a23e4fc4a20cd169a179383d674d3da
| 3,056
|
py
|
Python
|
patterns/cli/commands/upload.py
|
basis-os/basis-devkit
|
0e650457a905782ffd66b226d17f3d9546b4ed3b
|
[
"BSD-3-Clause"
] | 7
|
2021-12-08T17:17:33.000Z
|
2022-03-31T04:23:43.000Z
|
patterns/cli/commands/upload.py
|
basis-os/basis-devkit
|
0e650457a905782ffd66b226d17f3d9546b4ed3b
|
[
"BSD-3-Clause"
] | 28
|
2021-10-14T18:46:36.000Z
|
2022-03-30T20:39:15.000Z
|
patterns/cli/commands/upload.py
|
basis-os/basis-devkit
|
0e650457a905782ffd66b226d17f3d9546b4ed3b
|
[
"BSD-3-Clause"
] | null | null | null |
from pathlib import Path
from typer import Option, Argument
from patterns.cli.services.deploy import deploy_graph_version
from patterns.cli.services.graph_components import create_graph_component
from patterns.cli.services.lookup import IdLookup
from patterns.cli.services.output import sprint, abort_on_error
from patterns.cli.services.upload import upload_graph_version
_graph_help = "The location of the graph.yml file for the graph to upload"
_deploy_help = "Whether or not to automatically deploy the graph after upload"
_organization_help = "The name of the Patterns organization to upload to"
_environment_help = "The name of the Patterns environment to use if deploying the graph"
_component_help = "After uploading, publish the graph version as a public component"
def upload(
deploy: bool = Option(True, "--deploy/--no-deploy", help=_deploy_help),
organization: str = Option("", "-o", "--organization", help=_organization_help),
environment: str = Option("", "-e", "--environment", help=_environment_help),
graph: Path = Argument(None, exists=True, help=_graph_help),
publish_component: bool = Option(False, help=_component_help),
):
"""Upload a new version of a graph to Patterns"""
ids = IdLookup(
environment_name=environment,
organization_name=organization,
explicit_graph_path=graph,
)
with abort_on_error("Upload failed"):
resp = upload_graph_version(
ids.graph_file_path,
ids.organization_id,
add_missing_node_ids=not publish_component,
)
graph_version_id = resp["uid"]
ui_url = resp["ui_url"]
sprint(f"\n[success]Uploaded new graph version with id [b]{graph_version_id}")
errors = resp.get("errors", [])
if publish_component:
errors = [
e
for e in errors
if not e["message"].startswith("Top level input is not connected")
and not (
e["message"].startswith("Parameter")
and e["message"].endswith("has no default or value")
)
]
if errors:
sprint(f"[error]Graph contains the following errors:")
for error in errors:
sprint(f"\t[error]{error}")
if publish_component:
with abort_on_error("Error creating component"):
resp = create_graph_component(graph_version_id)
resp_org = resp["organization"]["slug"]
resp_version = resp["version_name"]
resp_component = resp["component"]["slug"]
resp_id = resp["uid"]
sprint(
f"[success]Published graph component "
f"[b]{resp_org}/{resp_component}[/b] "
f"with version [b]{resp_version}[/b] "
f"at id [b]{resp_id}"
)
elif deploy:
with abort_on_error("Deploy failed"):
deploy_graph_version(graph_version_id, ids.environment_id)
sprint(f"[success]Graph deployed")
sprint(f"\n[info]Visit [code]{ui_url}[/code] to view your graph")
| 39.688312
| 88
| 0.654777
|
d12a6b31c004895a8997c46226ccc9b9bec476a0
| 238
|
py
|
Python
|
Model Productionization/Task 1 - Creating and Debugging ML App/project.py
|
akashloka/Innomatics-Research-Labs-Data-Science
|
0537b1ae585d665eef3598327fc66b327a471228
|
[
"MIT"
] | null | null | null |
Model Productionization/Task 1 - Creating and Debugging ML App/project.py
|
akashloka/Innomatics-Research-Labs-Data-Science
|
0537b1ae585d665eef3598327fc66b327a471228
|
[
"MIT"
] | null | null | null |
Model Productionization/Task 1 - Creating and Debugging ML App/project.py
|
akashloka/Innomatics-Research-Labs-Data-Science
|
0537b1ae585d665eef3598327fc66b327a471228
|
[
"MIT"
] | null | null | null |
import streamlit as st
import data_app as da
import ml_app as ma
def main():
# EDA
da.main()
st.header("LogisticRegression Predictor :sunglasses:")
# Predictor
ma.main()
if(__name__ == '__main__'):
main()
| 11.333333
| 58
| 0.634454
|
800a1638d527081216b90f31ae8f4d20ef35ab4e
| 739
|
py
|
Python
|
spacy/tests/regression/test_issue8190.py
|
ZeeD/spaCy
|
884d439413662e45feba2d989f383234c0340b9d
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-29T08:15:09.000Z
|
2021-06-29T08:15:09.000Z
|
spacy/tests/regression/test_issue8190.py
|
ZeeD/spaCy
|
884d439413662e45feba2d989f383234c0340b9d
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-22T13:32:07.000Z
|
2021-06-23T09:15:29.000Z
|
spacy/tests/regression/test_issue8190.py
|
ZeeD/spaCy
|
884d439413662e45feba2d989f383234c0340b9d
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-06-25T02:39:44.000Z
|
2021-06-25T02:39:44.000Z
|
import spacy
from spacy.lang.en import English
from ..util import make_tempdir
def test_issue8190():
"""Test that config overrides are not lost after load is complete."""
source_cfg = {
"nlp": {
"lang": "en",
},
"custom": {
"key": "value"
}
}
source_nlp = English.from_config(source_cfg)
with make_tempdir() as dir_path:
# We need to create a loadable source pipeline
source_path = dir_path / "test_model"
source_nlp.to_disk(source_path)
nlp = spacy.load(source_path, config={
"custom": {
"key": "updated_value"
}
})
assert nlp.config["custom"]["key"] == "updated_value"
| 25.482759
| 73
| 0.558863
|
016830cb67a7682e8b291cff70f748ba23b7682d
| 5,796
|
py
|
Python
|
processing.py
|
dssg/mlpolicylab_fall20_bills1_public
|
c0b991daf24ef8d35689bbd7ad83baf142c420a2
|
[
"MIT"
] | null | null | null |
processing.py
|
dssg/mlpolicylab_fall20_bills1_public
|
c0b991daf24ef8d35689bbd7ad83baf142c420a2
|
[
"MIT"
] | null | null | null |
processing.py
|
dssg/mlpolicylab_fall20_bills1_public
|
c0b991daf24ef8d35689bbd7ad83baf142c420a2
|
[
"MIT"
] | 1
|
2021-11-22T19:34:00.000Z
|
2021-11-22T19:34:00.000Z
|
import string
import re
import time
import pandas as pd
from tqdm import tqdm
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer # or LancasterStemmer, RegexpStemmer, SnowballStemmer
from nltk.stem import WordNetLemmatizer
from multiprocessing import Pool
from queries import read_cols_query, add_block_val_columns_query
from db_ops import run_sql_query, write_col_in_table, write_df_in_table
"""
#Test dataframe
dict = {'First Score':[100, 90, np.nan, 95],
'Second Score': [30, 45, 56, np.nan],
'Third Score':[np.nan, 40, 80, 98]}
# creating a dataframe using dictionary
df = pd.DataFrame(dict)
"""
default_stemmer = PorterStemmer()
default_lemmatizer = WordNetLemmatizer()
default_stopwords = stopwords.words('english')
# Function credits:
# https://stackoverflow.com/questions/48865150/pipeline-for-text-cleaning-processing-in-python
def clean_text(text):
def tokenize_text(text):
return [w for w in word_tokenize(text)]
def remove_special_characters(text, characters=string.punctuation+string.digits):
return text.translate(str.maketrans('', '', characters))
def stem_text(tokens, stemmer=default_stemmer):
return [stemmer.stem(t) for t in tokens]
def lemmatize_text(tokens, lemmatizer=default_lemmatizer):
return [lemmatizer.lemmatize(t) for t in tokens]
def remove_stopwords(tokens, stop_words=default_stopwords):
tokens = [w for w in tokens if w not in stop_words]
return tokens
if text is None or text[0] is None:
return None
text = text[0]
text = text.strip(' ') # strip whitespaces
text = text.lower() # lowercase
text = remove_special_characters(text) # remove punctuation and symbols
text_tokens = tokenize_text(text)
text_tokens = remove_stopwords(text_tokens) # remove stopwords
text_tokens = stem_text(text_tokens) # stemming
# text_tokens = lemmatize_text(text_tokens) # lemmatizing
text = " ".join(text_tokens)
text = text.strip(' ') # strip whitespaces again
return text
def clean_text_data(texts):
texts = texts.values.tolist()
print("Cleaning text data...")
pool = Pool(20)
start = time.time()
cleaned_texts = pool.map(clean_text, texts)
pool.close()
pool.join()
print("time: ", time.time()-start)
return pd.DataFrame(cleaned_texts)
def rename_col(col, new_name):
"""
renames a pandas column
:param col: a pd.dataframe having a single column
:param new_name: (str) new name to give to that column
:return:
A pd.dataframe having a single column (renamed)
"""
col.columns = [new_name]
return col
def retype_col(col, new_type):
"""
renames a pandas column
:param col: a pd.dataframe having a single column
:param new_name: (str) new name to give to that column
:return:
A pd.dataframe having a single column (renamed)
"""
col = col.astype(new_type)
return col
def preprocess_data(conn, preprocess_ops, input_schema_name, input_table_name, output_schema_name, output_table_name):
"""
:param conn: a database connection object
:param preprocess_ops: dict mapping from column_names to operation_type
- column names must be names of columns present in the table
- operation type must be one of ['one_hot', 'mean_impute_col', 'add_dummy_col']
:param schema_name: Name of schema holding the table
:param table_name: Name of the table from which to read the columns
:return:
"""
data = []
for col_name in preprocess_ops:
ops = preprocess_ops[col_name]
read_query = read_cols_query([col_name], table_name=input_table_name, schema_name=input_schema_name)
col = run_sql_query(conn, read_query, return_dataframe=True)
for op in ops:
if "rename" in op:
op, new_name = op.split("::")
col = rename_col(col, new_name)
elif "retype" in op:
op, new_type = op.split("::")
col = retype_col(col, new_type)
else:
col = globals()[op](col)
# previously write col to table
# col_type = type_mapping[str(col.iloc[0].dtype)]
# col_values = col[col_name].to_list()
# index = list(map(int, col.index.values))
# write_col_in_table(conn, index, col_values, col_type, col_name, output_schema_name, output_table_name)
data.append(col)
df = pd.concat(data, axis=1)
write_df_in_table(conn, df, output_schema_name, output_table_name)
def create_temporal_blocks(conn, schema_name, table_name, year_col="introduced_date", start_year=2009, end_year=2019,
split_list=(2,1,1,1), update_freq=2, type_val=0, verbose=False):
"""
Define the temporal blocks, and add that information to the database
:param conn: a database connection object
:param schema_name: name of schema holding the table
:param table_name: name of table having the data rows
:param year_col: The name of the column in table holding the years to create temporal blocks on
"""
query, num_blocks = add_block_val_columns_query(table_name, schema_name, year_col, start_year=start_year, end_year=end_year,
split_list=split_list, update_freq=update_freq, type_val=type_val, return_num_blocks=True)
if verbose:
print(f"Created {num_blocks} temporal splits")
run_sql_query(conn, query, return_dataframe=True)
if __name__ == "__main__":
df = pd.DataFrame({'a':[96, 97, 98, 99],
'b': [1.11, 2.22, 3.33, 4.44],
'c':['abc', 'de', 'fg', 'hijk']})
| 35.341463
| 130
| 0.673395
|
000abd7ca8401c4f8cd1ba9e076949f644ed20e7
| 11,985
|
py
|
Python
|
PubMedNotifier.py
|
amartos/PubMedNotifier
|
b2aef9fa8a64d0d5400b08730db00653971592b2
|
[
"MIT"
] | null | null | null |
PubMedNotifier.py
|
amartos/PubMedNotifier
|
b2aef9fa8a64d0d5400b08730db00653971592b2
|
[
"MIT"
] | null | null | null |
PubMedNotifier.py
|
amartos/PubMedNotifier
|
b2aef9fa8a64d0d5400b08730db00653971592b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os, sys, argparse
import re, datetime, textwrap
import metapub
import configparser
from xdg import (XDG_CACHE_HOME, XDG_DATA_HOME, XDG_CONFIG_HOME)
class EmailSyntaxError(ValueError):
"""Error to raise if the provided e-mail is
not syntactically valid."""
pass
class EmptyDefaultError(ValueError):
"""Error to raise if a DEFAULT is empty."""
pass
class QueryInvalidError(ValueError):
"""Error to raise if a query is badly formatted."""
pass
class PubMedNotifier:
def __init__(self):
self._init_vars()
self._parse_args()
# These checks are done here as the _config_file var
# can be changed by the script's arguments
self._check_if_file_exists(self._config_file, abort=True)
self._parse_config()
self._check_if_file_exists(self._queries_file, abort=True)
self._parse_queries()
self._get_pmids_history()
if self._queries:
self._check_new_results()
if self._send_notification:
self._notify()
else:
self._error_log("No defined queries in {}".format(self._config_file))
def _init_vars(self):
self._execution_date = str(datetime.datetime.now())
self._are_errors = False # if script ran with errors, switch to True
self._config = None
self._queries_config = None
self._defaults = dict()
self._queries = dict()
self._results = dict()
self._results_txt = str()
self._counts = dict()
self._new_papers = dict()
self._history = list()
self._send_notification = bool()
self._cache_dir = str(XDG_CACHE_HOME.absolute())+"/pubmednotifier"
self._check_if_folder_exists(self._cache_dir)
self._data_dir = str(XDG_DATA_HOME.absolute())+"/pubmednotifier"
self._check_if_folder_exists(self._data_dir)
self._history_file = self._data_dir+"/history"
self._check_if_file_exists(self._history_file)
self._queries_file = self._data_dir+"/queries"
self._check_if_file_exists(self._queries_file)
self._results_dir = self._data_dir+"/results"
self._check_if_folder_exists(self._results_dir)
self._new_papers_file = self._results_dir+"/results_"+self._execution_date+".md"
self._config_dir = str(XDG_CONFIG_HOME.absolute())+"/pubmednotifier"
self._check_if_folder_exists(self._config_dir)
self._config_file = self._config_dir+"/config"
self._log_dir = self._data_dir+"/logs"
self._log_file_name = "log_"+self._execution_date
self._check_if_folder_exists(self._log_dir)
self._log_file = self._log_dir+"/"+self._log_file_name
def _check_if_folder_exists(self, path):
if not os.path.exists(path):
os.mkdir(path)
def _check_if_file_exists(self, path, abort=False):
if not os.path.exists(path):
if abort:
self._error_log("'{}' does not exists.".format(path), abort=True)
else:
open(path, "w").close()
def _parse_args(self):
parser = argparse.ArgumentParser(self,
description="""PubMedNotifier is a script that fetch queries
results from the Pubmed API and notify if new papers are
available."""
)
parser.add_argument(
"-c", "--config",
help="""Specify a path for the config file. Default is in
$XDG_CONFIG_HOME/pubmednotifier/config"""
)
parser.add_argument(
"-q", "--queries",
help="""Specify a path for the queries file. Default is in
$XDG_DATA_HOME/pubmednotifier/queries"""
)
parser.add_argument(
"-o", "--output",
help="""Specify a path for the results file. Default is in
$XDG_DATA_HOME/pubmednotifier/results/results_execution-date.md"""
)
parser.add_argument(
"-q", "--quiet",
help="""Disables notifications.""",
action="store_true"
)
args = parser.parse_args()
if args.config:
self._config_file = args.config
if args.file:
self._queries_file = args.file
if args.output:
self._new_papers_file = args.output
self._send_notification = not args.quiet
def _parse_config(self):
self._config = self._read_config(self._config_file)
self._parse_default_config()
def _read_config(self, filepath):
parser = configparser.ConfigParser()
parser.read_file(open(filepath))
return parser
def _parse_default_config(self):
self._defaults["e-mail"] = self._get_default_parameters("e-mail")
self._defaults["retstart"] = self._get_default_parameters("retstart")
self._defaults["retmax"] = self._get_default_parameters("retmax")
self._defaults["mindate"] = self._get_default_parameters("mindate")
self._defaults["maxdate"] = None
def _get_default_parameters(self, parameter):
"""Get the DEFAULT parameter if valid,
but raise error and abort if invalid"""
try:
value = self._config["DEFAULT"][parameter]
# check if parameter is empty
if not value:
raise EmptyDefaultError
else:
# check if e-mail syntax is valid
if parameter == "e-mail" and \
not bool(re.fullmatch(r"[^@]+@[^@]+\.[^@]+", value)):
raise EmailSyntaxError
else:
return value
except KeyError or EmptyDefaultError as err:
self._error_log("DEFAULT {} is not defined.".format(parameter), abort=True)
except EmailSyntaxError as err:
self._error_log("{} is not a syntactically valid e-mail.".format(value), abort=True)
def _parse_queries(self):
self._queries_config = self._read_config(self._queries_file)
for item in self._queries_config.sections():
self._read_one_query(item)
def _read_one_query(self, title):
try:
term = self._queries_config.get(title, "query")
if not term:
raise QueryInvalidError
except configparser.NoOptionError or KeyError or QueryInvalidError as err:
self._error_log("Query {} is not valid.\n".format(title))
return
self._queries[title] = {
"query":term,
"retstart":"",
"retmax":"",
"mindate":"",
"maxdate":"",
}
for item in self._queries[title].keys():
if item != "query":
try:
self._queries[title][item] = self._queries_config.get(title,item)
except configparser.NoOptionError:
self._queries[title][item] = self._defaults[item]
def _get_pmids_history(self):
with open(self._history_file,"r") as f :
self._history = [i.strip("\n") for i in f.readlines()]
def _check_new_results(self):
self._fetch_results()
self._check_pmids_history()
self._count_new_items()
self._retrieve_new_pmid_infos()
self._save_new_pmids_in_history()
self._format_results()
self._write_results()
def _fetch_results(self):
self._fetcher = metapub.PubMedFetcher(email=self._defaults["e-mail"], cachedir=self._cache_dir)
for title, values in self._queries.items():
try:
ids = self._fetcher.pmids_for_query(
query=values["query"],
since=values["mindate"],
until=values["maxdate"],
retstart=values["retstart"],
retmax=values["retmax"],
)
self._results[title] = ids
# cacth all exceptions as an error here could be anything
# from the NCBI server
except:
self._error_log("Error fetching query {}".format(title))
def _check_pmids_history(self):
temp_dict = dict(self._results)
for title, ids in temp_dict.items():
new_items = [i for i in ids if not i in self._history]
if new_items:
self._results[title] = new_items
else:
del self._results[title]
def _count_new_items(self):
for title in self._results.keys():
self._counts[title] = len(self._results[title])
def _retrieve_new_pmid_infos(self):
for title, ids in self._results.items():
self._new_papers[title] = dict()
for pmid in ids:
try:
article = self._fetcher.article_by_pmid(pmid)
except metapub.InvalidPMID as err:
self._error_log("Error fetching pmid {}".format(pmid))
self._new_papers[title][pmid] = (
article.title,
article.journal,
article.year,
", ".join(article.authors),
article.abstract
)
def _save_new_pmids_in_history(self):
with open(self._history_file,"a") as f :
for title, ids in self._results.items():
f.write("\n"+"\n".join(ids))
def _write_results(self):
with open(self._new_papers_file, "w") as f:
f.write(self._results_txt)
def _format_results(self):
self._results_txt = str()
if self._are_errors:
self._results_txt = "The script ran with errors. See logfile '{}'\n\n".format(self._log_file_name)
for query, ids in self._new_papers.items():
self._results_txt += "# "+query+"\n\n"
for pmid, infos in ids.items():
title, journal, year, authors, abstract = infos
if not abstract or abstract == "None":
abstract = "No abstract."
else:
abstract = "\n".join(textwrap.wrap(abstract, width=80))
self._results_txt += "## {}\n\n{}, *{}*, {}\n\n[PMID: {}]({})\n\n{}\n\n".format(
title,
authors,
journal,
year,
pmid,
"https://www.ncbi.nlm.nih.gov/pubmed/"+pmid,
abstract,
)
def _notify(self):
if os.path.exists(self._new_papers_file):
self._desktop_notification()
def _desktop_notification(self):
import notify2
message = str()
for title, count in self._counts.items():
message += title+": {} new papers\n".format(str(count))
if message:
notify2.init("PubMedNotifier")
notifier = notify2.Notification(message)
notifier.show()
return
def _error_log(self, err_msg, abort=False):
self._are_errors = True
with open(self._log_file, "a") as f:
f.write(err_msg+"\n")
print(err_msg)
if abort:
sys.exit(1)
self._check_log_size()
def _check_log_size(self):
"""In case that the log file becomes too big, create a new one with a
new timestamp (that should be close to the script execution date)"""
if os.stat(self._log_file).st_size >= 500:
self._log_file = self._data_dir+"/log_"+str(datetime.datetime.now())
with open(self._log_file, "w") as f:
f.write(self._execution_date+": log file too big, creating a new one"+"\n")
if __name__ == "__main__":
PubMedNotifier()
| 36.539634
| 110
| 0.571464
|
3a6a174366c856bf6a6696b3a0c560d8785a5719
| 9,598
|
py
|
Python
|
codecarbon/viz/data.py
|
fvaleye/codecarbon
|
9564dc53a94aeda9816316404290e3e5067336c5
|
[
"MIT"
] | null | null | null |
codecarbon/viz/data.py
|
fvaleye/codecarbon
|
9564dc53a94aeda9816316404290e3e5067336c5
|
[
"MIT"
] | null | null | null |
codecarbon/viz/data.py
|
fvaleye/codecarbon
|
9564dc53a94aeda9816316404290e3e5067336c5
|
[
"MIT"
] | null | null | null |
from typing import Dict, List, Tuple
import dash_table as dt
import pandas as pd
from codecarbon.core.emissions import Emissions
from codecarbon.input import DataSource, DataSourceException
class Data:
def __init__(self):
self._data_source = DataSource()
self._emissions = Emissions(self._data_source)
@staticmethod
def get_project_data(df: pd.DataFrame, project_name) -> dt.DataTable:
project_df = df[df.project_name == project_name]
project_df = project_df.sort_values(by="timestamp")
project_data = project_df.to_dict("rows")
columns = [{"name": column, "id": column} for column in project_df.columns]
return dt.DataTable(data=project_data, columns=columns)
@staticmethod
def get_project_summary(project_data: List[Dict]):
last_run = project_data[-1]
project_summary = {
"last_run": {
"timestamp": last_run["timestamp"],
"duration": last_run["duration"],
"emissions": round(last_run["emissions"], 1),
"energy_consumed": round((last_run["energy_consumed"]), 1),
},
"total": {
"duration": sum(
map(lambda experiment: experiment["duration"], project_data)
),
"emissions": sum(
map(lambda experiment: experiment["emissions"], project_data)
),
"energy_consumed": sum(
map(lambda experiment: experiment["energy_consumed"], project_data)
),
},
"country_name": last_run["country_name"],
"country_iso_code": last_run["country_iso_code"],
"region": last_run["region"],
"on_cloud": last_run["on_cloud"],
"cloud_provider": last_run["cloud_provider"],
"cloud_region": last_run["cloud_region"],
}
return project_summary
def get_car_miles(self, project_carbon_equivalent: float):
"""
8.89 × 10-3 metric tons CO2/gallon gasoline ×
1/22.0 miles per gallon car/truck average ×
1 CO2, CH4, and N2O/0.988 CO2
= 4.09 x 10-4 metric tons CO2E/mile
= 0.409 kg CO2E/mile
Source: EPA
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: number of miles driven by avg car
"""
return "{:.0f}".format(project_carbon_equivalent / 0.409)
def get_tv_time(self, project_carbon_equivalent: float):
"""
Gives the amount of time
a 32-inch LCD flat screen TV will emit
an equivalent amount of carbon
Ratio is 0.097 kg CO2 / 1 hour tv
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: equivalent TV time
"""
time_in_minutes = project_carbon_equivalent * (1 / 0.097) * 60
formated_value = "{:.0f} minutes".format(time_in_minutes)
if time_in_minutes >= 60:
time_in_hours = time_in_minutes / 60
formated_value = "{:.0f} hours".format(time_in_hours)
if time_in_hours >= 24:
time_in_days = time_in_hours / 24
formated_value = "{:.0f} days".format(time_in_days)
return formated_value
def get_household_fraction(self, project_carbon_equivalent: float):
"""
Total CO2 emissions for energy use per home: 5.734 metric tons CO2 for electricity
+ 2.06 metric tons CO2 for natural gas + 0.26 metric tons CO2 for liquid petroleum gas
+ 0.30 metric tons CO2 for fuel oil = 8.35 metric tons CO2 per home per year / 52 weeks
= 160.58 kg CO2/week on average
Source: EPA
:param project_carbon_equivalent: total project emissions in kg CO2E
:return: % of weekly emissions re: an average American household
"""
return "{:.2f}".format((project_carbon_equivalent / 160.58) * 100)
def get_global_emissions_choropleth_data(
self, net_energy_consumed: float
) -> List[Dict]:
def formatted_energy_percentage(energy_type: float, total: float) -> float:
return float("{:.1f}".format((energy_type / total) * 100))
global_energy_mix = self._data_source.get_global_energy_mix_data()
choropleth_data = []
for country_iso_code in global_energy_mix.keys():
country_name = global_energy_mix[country_iso_code]["countryName"]
if country_iso_code not in ["_define", "ATA"]:
from codecarbon.core.units import Energy
energy_consumed = Energy.from_energy(kWh=net_energy_consumed)
from codecarbon.external.geography import GeoMetadata
country_emissions = self._emissions.get_country_emissions(
energy_consumed,
GeoMetadata(
country_name=country_name, country_iso_code=country_iso_code
),
)
total = global_energy_mix[country_iso_code]["total"]
choropleth_data.append(
{
"iso_code": country_iso_code,
"emissions": country_emissions,
"country": country_name,
"fossil": formatted_energy_percentage(
global_energy_mix[country_iso_code]["fossil"], total
),
"geothermal": formatted_energy_percentage(
global_energy_mix[country_iso_code]["geothermal"], total
),
"hydroelectricity": formatted_energy_percentage(
global_energy_mix[country_iso_code]["hydroeletricity"],
total,
),
"nuclear": formatted_energy_percentage(
global_energy_mix[country_iso_code]["nuclear"], total
),
"solar": formatted_energy_percentage(
global_energy_mix[country_iso_code]["solar"], total
),
"wind": formatted_energy_percentage(
global_energy_mix[country_iso_code]["wind"], total
),
}
)
return choropleth_data
def get_regional_emissions_choropleth_data(
self, net_energy_consumed: float, country_iso_code: str
) -> List[Dict]:
# add country codes here to render for different countries
if country_iso_code.upper() not in ["USA", "CAN"]:
return [{"region_code": "", "region_name": "", "emissions": ""}]
try:
region_emissions = self._data_source.get_country_emissions_data(
country_iso_code.lower()
)
except DataSourceException: # This country has regional data at the energy mix level, not the emissions level
country_energy_mix = self._data_source.get_country_energy_mix_data(
country_iso_code.lower()
)
region_emissions = {
region: {"regionCode": region}
for region, energy_mix in country_energy_mix.items()
}
choropleth_data = []
for region_name in region_emissions.keys():
region_code = region_emissions[region_name]["regionCode"]
if region_name not in ["_unit"]:
from codecarbon.core.units import Energy
energy_consumed = Energy.from_energy(kWh=net_energy_consumed)
from codecarbon.external.geography import GeoMetadata
emissions = self._emissions.get_region_emissions(
energy_consumed,
GeoMetadata(country_iso_code=country_iso_code, region=region_name),
)
choropleth_data.append(
{
"region_code": region_code,
"region_name": region_name.upper(),
"emissions": emissions,
}
)
return choropleth_data
def get_cloud_emissions_barchart_data(
self,
net_energy_consumed: float,
on_cloud: str,
cloud_provider: str,
cloud_region: str,
) -> Tuple[str, pd.DataFrame]:
if on_cloud == "N":
return (
"",
pd.DataFrame(data={"region": [], "emissions": [], "countryName": []}),
)
cloud_emissions = self._data_source.get_cloud_emissions_data()
cloud_emissions = cloud_emissions[
["provider", "providerName", "region", "impact", "countryName"]
]
from codecarbon.core.units import EmissionsPerKWh
cloud_emissions["emissions"] = cloud_emissions.apply(
lambda row: EmissionsPerKWh.from_g_per_kWh(row.impact).kgs_per_kWh
* net_energy_consumed,
axis=1,
)
cloud_emissions_project_region = cloud_emissions[
cloud_emissions.region == cloud_region
]
cloud_emissions = cloud_emissions[
(cloud_emissions.provider == cloud_provider)
& (cloud_emissions.region != cloud_region)
].sort_values(by="emissions")
return (
cloud_emissions_project_region.iloc[0, :].providerName,
pd.concat([cloud_emissions_project_region, cloud_emissions]),
)
| 41.37069
| 118
| 0.577933
|
81ab0ab09f1d1148603c7750e33ea35fb496e0a3
| 2,996
|
py
|
Python
|
beta/UNET.py
|
Mike-n-ike/deep-learning-for-pkd-patients
|
9812a0a54d42dee0d986c78f046ae9fb7d0027db
|
[
"MIT"
] | null | null | null |
beta/UNET.py
|
Mike-n-ike/deep-learning-for-pkd-patients
|
9812a0a54d42dee0d986c78f046ae9fb7d0027db
|
[
"MIT"
] | null | null | null |
beta/UNET.py
|
Mike-n-ike/deep-learning-for-pkd-patients
|
9812a0a54d42dee0d986c78f046ae9fb7d0027db
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torchvision.transforms.functional as TF
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(DoubleConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False), ##[(W−K+2P)/S]+1 = W, solve for P
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
class UNET(nn.Module): ## let's start with binary segmentation
def __init__(
self, in_channels=1, out_channels=1, features=[64, 128, 256, 512]):
super(UNET, self).__init__()
self.downs = nn.ModuleList()
self.ups = nn.ModuleList()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
## down sampling
for feature in features:
self.downs.append(DoubleConv(in_channels, feature))
in_channels = feature
## up sampling
for feature in reversed(features):
self.ups.append(
nn.ConvTranspose2d(
feature*2, feature, kernel_size=2, stride=2
))
self.ups.append(DoubleConv(feature*2, feature))
self.bottleneck = DoubleConv(features[-1], features[-1]*2)
self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1)
def forward(self,x):
skip_connections = []
for down in self.downs:
x = down(x)
skip_connections.append(x)
x = self.pool(x)
x = self.bottleneck(x)
skip_connections = skip_connections[::-1] ## reversing the list
for i in range(0, len(self.ups), 2):
x = self.ups[i](x)
skip_connection = skip_connections[i//2]
if x.shape != skip_connection.shape:
x = TF.resize(x, size=skip_connection.shape[2:])
concat_skip = torch.cat((skip_connection, x), dim=1)
x = self.ups[i+1](concat_skip)
return self.final_conv(x)
def test():
print("----------------")
print("Testing UNET with inputs divisible by 16")
x0 = torch.randn((1, 1, 160, 160))
model0 = UNET(in_channels=1, out_channels=1)
preds0 = model0(x0)
print("Input size: ", x0.shape)
print("Output size: ", preds0.shape)
if x0.shape == preds0.shape:
print("Input and output sizes agree")
print("----------------")
print("Testing UNET with inputs not divisible by 16")
x1 = torch.randn((1, 1, 161, 161))
model1 = UNET(in_channels=1, out_channels=1)
preds1 = model1(x1)
print("Input size: ", x1.shape)
print("Output size: ", preds1.shape)
if x1.shape == preds1.shape:
print("Input and output sizes agree")
| 33.288889
| 131
| 0.590788
|
2f3df41013cfc23e0fd223fa20a8a21ac0eea406
| 2,605
|
py
|
Python
|
delegates/lgbattery.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | 5
|
2018-07-08T20:05:52.000Z
|
2021-11-29T03:07:00.000Z
|
delegates/lgbattery.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | 2
|
2016-10-13T13:02:54.000Z
|
2021-03-05T17:08:55.000Z
|
delegates/lgbattery.py
|
stevepbyrne/dbus-systemcalc-py
|
4d50ca36af51bbe1e3040cb63f60ef262da5d397
|
[
"MIT"
] | 13
|
2015-04-13T12:21:24.000Z
|
2022-01-24T16:28:35.000Z
|
import logging
from dbus.exceptions import DBusException
from delegates.base import SystemCalcDelegate
class LgCircuitBreakerDetect(SystemCalcDelegate):
def __init__(self):
SystemCalcDelegate.__init__(self)
self._lg_voltage_buffer = None
self._lg_battery = None
def set_sources(self, dbusmonitor, settings, dbusservice):
SystemCalcDelegate.set_sources(self, dbusmonitor, settings, dbusservice)
self._dbusservice.add_path('/Dc/Battery/Alarms/CircuitBreakerTripped', value=None)
def device_added(self, service, instance, do_service_change=True):
service_type = service.split('.')[2]
if service_type == 'battery' and self._dbusmonitor.get_value(service, '/ProductId') == 0xB004:
logging.info('LG battery service appeared: %s' % service)
self._lg_battery = service
self._lg_voltage_buffer = []
self._dbusservice['/Dc/Battery/Alarms/CircuitBreakerTripped'] = 0
def device_removed(self, service, instance):
if service == self._lg_battery:
logging.info('LG battery service disappeared: %s' % service)
self._lg_battery = None
self._lg_voltage_buffer = None
self._dbusservice['/Dc/Battery/Alarms/CircuitBreakerTripped'] = None
def update_values(self, newvalues):
vebus_path = newvalues.get('/VebusService')
if self._lg_battery is None or vebus_path is None:
return
battery_current = self._dbusmonitor.get_value(self._lg_battery, '/Dc/0/Current')
if battery_current is None or abs(battery_current) > 0.01:
if len(self._lg_voltage_buffer) > 0:
logging.debug('LG voltage buffer reset')
self._lg_voltage_buffer = []
return
vebus_voltage = self._dbusmonitor.get_value(vebus_path, '/Dc/0/Voltage')
if vebus_voltage is None:
return
self._lg_voltage_buffer.append(float(vebus_voltage))
if len(self._lg_voltage_buffer) > 40:
self._lg_voltage_buffer = self._lg_voltage_buffer[-40:]
elif len(self._lg_voltage_buffer) < 20:
return
min_voltage = min(self._lg_voltage_buffer)
max_voltage = max(self._lg_voltage_buffer)
battery_voltage = self._dbusmonitor.get_value(self._lg_battery, '/Dc/0/Voltage')
logging.debug('LG battery current V=%s I=%s' % (battery_voltage, battery_current))
if min_voltage < 0.9 * battery_voltage or max_voltage > 1.1 * battery_voltage:
logging.error('LG shutdown detected V=%s I=%s %s' %
(battery_voltage, battery_current, self._lg_voltage_buffer))
self._dbusservice['/Dc/Battery/Alarms/CircuitBreakerTripped'] = 2
self._lg_voltage_buffer = []
try:
self._dbusmonitor.set_value(vebus_path, '/Mode', 4)
except DBusException:
logging.error('Cannot switch off vebus device')
| 42.704918
| 96
| 0.758925
|
410af19fbd3849a42747e895c6646a23530149cc
| 1,392
|
py
|
Python
|
google/appengine/_internal/django/utils/version.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/_internal/django/utils/version.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/_internal/django/utils/version.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
from google.appengine._internal import django
import os.path
import re
def get_svn_revision(path=None):
"""
Returns the SVN revision in the form SVN-XXXX,
where XXXX is the revision number.
Returns SVN-unknown if anything goes wrong, such as an unexpected
format of internal SVN files.
If path is provided, it should be a directory whose SVN info you want to
inspect. If it's not provided, this will use the root django/ package
directory.
"""
rev = None
if path is None:
path = django.__path__[0]
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = rev_match.groups()[0]
# Older XML versions of the file specify revision as an attribute of
# the first entries node.
else:
from xml.dom import minidom
dom = minidom.parse(entries_path)
rev = dom.getElementsByTagName('entry')[0].getAttribute('revision')
if rev:
return 'SVN-%s' % rev
return 'SVN-unknown'
| 31.636364
| 81
| 0.62069
|
cae976e944676f75cdc057a32a6d7e0d68b79aac
| 962
|
py
|
Python
|
setup.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
setup.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
setup.py
|
znerol/spreadflow-delta
|
246f6d61072c41b5a8a68053650b731981259aab
|
[
"MIT"
] | null | null | null |
from setuptools import setup
tests_require = [
'coveralls',
'mock',
'testtools'
]
setup(
name='SpreadFlowDelta',
version='0.0.1',
description='Common SpreadFlow processors for delta-type messages',
author='Lorenz Schori',
author_email='lo@znerol.ch',
url='https://github.com/znerol/spreadflow-delta',
packages=[
'spreadflow_delta',
'spreadflow_delta.test'
],
install_requires=[
'SpreadFlowCore'
],
tests_require=tests_require,
extras_require={
'tests': tests_require
},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Twisted',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia'
]
)
| 24.666667
| 71
| 0.608108
|
7d3f4706f25527af22f874cc47d480c4ff3f768b
| 925
|
py
|
Python
|
nlcpy_test/712_IReadWrite_Shared.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
nlcpy_test/712_IReadWrite_Shared.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
nlcpy_test/712_IReadWrite_Shared.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
from mpi4pyve import MPI
import numpy as np
import nlcpy as vp
from utils_io import get_fh
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
fh = get_fh()
fh.Set_size(0)
fh.Set_view(0, MPI.INT)
x = vp.array([1,2,3], dtype=int)
y = vp.empty(3, dtype=int)
print("x = ",x)
print("type(x) = ",type(x))
print("y = ",y)
print("type(y) = ",type(y))
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Iwrite_shared(x).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Iread_shared(y).Wait()
comm.Barrier()
print("Iwrite_shared-Iread_shared done")
print("x = ",x)
print("type(x) = ",type(x))
print("y = ",y)
print("type(y) = ",type(y))
if fh:
fh.Close()
comm.Barrier()
import sys
try:
y
if not isinstance(y, vp.core.core.ndarray):
print("NG : ", __file__, file=sys.stderr)
except NameError:
print("Failure test case : ", __file__, file=sys.stderr)
| 18.877551
| 60
| 0.641081
|
fcee05a5a6a9adcb8f8db425c521c7c23fed06fc
| 3,323
|
py
|
Python
|
utils/pager.py
|
mmmattleung/django_blog
|
cbb4ddf1737f7f09248d172478fcd9e2b79b7f0a
|
[
"Apache-2.0"
] | null | null | null |
utils/pager.py
|
mmmattleung/django_blog
|
cbb4ddf1737f7f09248d172478fcd9e2b79b7f0a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:37:13.000Z
|
2022-03-12T00:13:10.000Z
|
utils/pager.py
|
mmmattleung/django_blog
|
cbb4ddf1737f7f09248d172478fcd9e2b79b7f0a
|
[
"Apache-2.0"
] | 1
|
2020-10-27T03:30:25.000Z
|
2020-10-27T03:30:25.000Z
|
import copy
from django.urls import reverse
from django.core.paginator import Paginator,Page,PageNotAnInteger,EmptyPage
def get_pager(self, request, articles, pages):
def _get_request_param(request):
page_param_dict = copy.deepcopy(request.GET)
page_param_dict._mutable = True
if request.GET.get("page"):
current_page = int(request.GET.get("page"))
else:
current_page = 1
page_param_dict["page"] = 1
return current_page, page_param_dict
current_page, page_param_dict = _get_request_param(request)
def _get_page_object(articles, pages):
p = Paginator(articles, pages)
ps = p.page(current_page)
return p, ps
p, ps = _get_page_object(articles, pages)
def _get_url(self):
return reverse(
"{2}:{0}_{1}_changelist".format(self.app_label, self.model_name, self.site_object.name_space))
base_page_url = _get_url(self)
object_list = articles[ps.start_index() - 1:ps.end_index()]
def _set_next_pre_url(ps):
if ps.has_previous():
page_param_dict["page"] = current_page - 1
previous_url = "%s?%s" % (base_page_url, page_param_dict.urlencode())
ps.previous_url = previous_url
if ps.has_next():
page_param_dict["page"] = current_page + 1
next_url = "%s?%s" % (base_page_url, page_param_dict.urlencode())
ps.next_url = next_url
_set_next_pre_url(ps)
def _count_pages(p, current_page):
print(current_page, pages, p.num_pages)
# 1 - 5 页
if current_page - pages < 0:
begin = 1
if p.num_pages >= pages:
end = pages + 1
else:
end = p.num_pages + 1
# 5 - ... 页
elif current_page >= pages:
if current_page + pages < p.num_pages:
begin = current_page
end = p.num_pages - (pages * 2 + 1)
# 6 + 5 > 8
elif current_page + pages >= p.num_pages:
begin = p.num_pages - pages + 1
end = p.num_pages + 1
else:
begin = current_page
end = current_page + pages + 1
# elif current_page + pages < p.num_pages:
# begin = p.num_pages - (pages * 2 + 1)
# end = p.num_pages
# elif pages * 2 <= p.num_pages:
# begin = current_page - pages
# end = current_page + pages + 1
# else:
# begin = current_page - pages
# end = pages + 1
return range(begin, end)
diy_range = _count_pages(p, current_page)
print(diy_range)
def _set_html_tags(ps, diy_range):
range_str = ""
for item in diy_range:
tmp = "<a href='{0}' class='{1}' >{2}</a>"
if item == current_page:
item_class = "btn btn-white active"
else:
item_class = "btn btn-white"
page_param_dict["page"] = item
tmp = tmp.format(
"%s?%s" % (base_page_url, page_param_dict.urlencode()),
item_class,
item
)
range_str += tmp
ps.diy_range = range_str
_set_html_tags(ps, diy_range)
return ps, object_list
| 33.565657
| 102
| 0.552212
|
ad172acaf89d6275296f7d80da6bbe43039bb66c
| 334
|
py
|
Python
|
vow.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
vow.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | null | null | null |
vow.py
|
CrownCrafter/School
|
488810b223ad746d7d1b396e609ce8f90f25662c
|
[
"MIT"
] | 1
|
2021-02-06T04:28:17.000Z
|
2021-02-06T04:28:17.000Z
|
#!/usr/bin/env python3
s = input("Enter string ")
v = 0
c = 0
u = 0
l = 0
for i in s:
if(i in 'aeiouAEIOU'):
v +=1
else:
c +=1
if(i.isupper() == True):
u += 1
else:
l += 1
print("Vowels " + str(v))
print("Consonants " + str(c))
print("Uppercase " + str(u))
print("Lowercase " + str(l))
| 16.7
| 29
| 0.488024
|
1ba9fdb709c41b5dd639de2b461565007c051214
| 1,348
|
py
|
Python
|
vpv/ui/views/ui_editor_tab.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
vpv/ui/views/ui_editor_tab.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
vpv/ui/views/ui_editor_tab.py
|
Dorky-Lever/vpv
|
0f156b2ad79cbb7060140434e34b5841ab5b1a26
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_editor_tab.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_console(object):
def setupUi(self, console):
console.setObjectName("console")
console.resize(593, 603)
self.verticalLayout = QtWidgets.QVBoxLayout(console)
self.verticalLayout.setObjectName("verticalLayout")
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.setObjectName("mainLayout")
self.tableViewVolumes = QtWidgets.QTableView(console)
self.tableViewVolumes.setObjectName("tableViewVolumes")
self.mainLayout.addWidget(self.tableViewVolumes)
self.verticalLayout.addLayout(self.mainLayout)
self.pushButton = QtWidgets.QPushButton(console)
self.pushButton.setObjectName("pushButton")
self.verticalLayout.addWidget(self.pushButton)
self.retranslateUi(console)
QtCore.QMetaObject.connectSlotsByName(console)
def retranslateUi(self, console):
_translate = QtCore.QCoreApplication.translate
console.setWindowTitle(_translate("console", "Form"))
self.pushButton.setText(_translate("console", "save selected images"))
import resources_rc
| 37.444444
| 78
| 0.72181
|
35c202aa5bfc5885117bd57c2e9015eed05952c6
| 817
|
py
|
Python
|
lecture_03/16_inverse_kinematics_rhino.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 48
|
2021-11-27T05:28:31.000Z
|
2022-02-06T16:08:30.000Z
|
lecture_03/16_inverse_kinematics_rhino.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 15
|
2021-03-03T10:50:59.000Z
|
2021-06-21T07:47:47.000Z
|
lecture_03/16_inverse_kinematics_rhino.py
|
g-jami/COMPAS-II-FS2021
|
3282036db5f7caa2d904370d47878e578092ae24
|
[
"MIT"
] | 25
|
2021-03-02T15:08:11.000Z
|
2022-03-29T14:34:20.000Z
|
from compas.geometry import Frame
from compas.robots import LocalPackageMeshLoader
from compas.robots import RobotModel
from compas_rhino.artists import RobotModelArtist
from ur_kinematics import inverse_kinematics_ur5
loader = LocalPackageMeshLoader('models', 'ur_description')
model = RobotModel.from_urdf_file(loader.load_urdf('ur5.urdf'))
model.load_geometry(loader)
f = Frame((0.417, 0.191, -0.005), (-0.000, 1.000, 0.000), (1.000, 0.000, 0.000))
f.point /= 0.001
sols = inverse_kinematics_ur5(f)
artist = RobotModelArtist(model, layer='COMPAS::Robot Viz')
artist.clear_layer()
for joint_values in sols:
# Create joint state dictionary
joint_names = model.get_configurable_joint_names()
joint_state = dict(zip(joint_names, joint_values))
artist.update(joint_state)
artist.draw_visual()
| 31.423077
| 80
| 0.773562
|
ccc0a987ed824e16ae1ded79fe754d6cf75a735f
| 5,389
|
py
|
Python
|
src/utils/setup.py
|
danielecalda/ReviewExplanationExtraction
|
91bb212f91cec6283e668eaba4196104e27e8ba1
|
[
"Apache-2.0"
] | null | null | null |
src/utils/setup.py
|
danielecalda/ReviewExplanationExtraction
|
91bb212f91cec6283e668eaba4196104e27e8ba1
|
[
"Apache-2.0"
] | null | null | null |
src/utils/setup.py
|
danielecalda/ReviewExplanationExtraction
|
91bb212f91cec6283e668eaba4196104e27e8ba1
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import json
import collections
import spacy
from metal.contrib.info_extraction.mentions import RelationMention
import numpy as np
import progressbar
DATA_FILE1 = 'data/train_examples.pkl'
DATA_FILE2 = 'data/dev_examples.pkl'
DATA_FILE3 = 'data/test_examples.pkl'
DATA_FILE4 = 'data/train_labels.pkl'
DATA_FILE5 = 'data/dev_labels.pkl'
DATA_FILE6 = 'data/test_labels.pkl'
DATA_FILE7 = 'data/data.pkl'
DATA_FILE8 = 'data/labels.pkl'
def setup(train_size, dev_size, test_size):
train_list = []
dev_list = []
test_list = []
print("Reading from csv and splitting")
for i, line in enumerate(open('../data/reviews200k.json', 'r')):
if i < 100000 and len(line) > 300:
train_list.append(json.loads(line))
if 99999 < i < 100500 and len(line) > 300:
dev_list.append(json.loads(line))
if 149999 < i < 200000 and len(line) > 300:
test_list.append(json.loads(line))
print(len(train_list))
train_reviews = []
for i in range(0, 6):
j = 0
for line in train_list:
if i == int(line['stars']):
train_reviews.append(line)
j = j + 1
if j > train_size:
break
train_examples = [review['text'].lower() for review in train_reviews]
train_labels = [int(review['stars']) for review in train_reviews]
print(collections.Counter(train_labels))
with open(DATA_FILE1, 'wb') as f:
pickle.dump(train_examples, f)
with open(DATA_FILE4, 'wb') as f:
pickle.dump(train_labels, f)
print(len(dev_list))
dev_reviews = []
for i in range(0, 6):
j = 0
for line in dev_list:
if i == int(line['stars']):
dev_reviews.append(line)
j = j + 1
if j > dev_size:
break
dev_examples = [review['text'].lower() for review in dev_reviews]
dev_labels = [int(review['stars']) for review in dev_reviews]
with open(DATA_FILE2, 'wb') as f:
pickle.dump(dev_examples, f)
with open(DATA_FILE5, 'wb') as f:
pickle.dump(dev_labels, f)
print(len(test_list))
test_reviews = []
for i in range(0, 6):
j = 0
for line in test_list:
if i == int(line['stars']):
test_reviews.append(line)
j = j + 1
if j > test_size:
break
test_examples = [review['text'].lower() for review in test_reviews]
test_labels = [int(review['stars']) for review in test_reviews]
print(collections.Counter(test_labels))
with open(DATA_FILE3, 'wb') as f:
pickle.dump(test_examples, f)
with open(DATA_FILE6, 'wb') as f:
pickle.dump(test_labels, f)
print("Done")
print("Creating objects")
train_results = []
spacy_nlp = spacy.load('en_core_web_sm')
for example in progressbar.progressbar(train_examples):
doc = spacy_nlp(example)
words, char_offsets, pos_tags, ner_tags, entity_types = ([] for i in range(5))
for sent in doc.sents:
for i, token in enumerate(sent):
words.append(str(token))
pos_tags.append(token.tag_)
ner_tags.append(token.ent_type_ if token.ent_type_ else 'O')
char_offsets.append(token.idx)
entity_types.append('O')
result = RelationMention(1, example, [(0, 2), (4, 5)], words, char_offsets, pos_tags=pos_tags,
ner_tags=ner_tags, entity_types=entity_types)
train_results.append(result)
dev_results = []
for example in progressbar.progressbar(dev_examples):
doc = spacy_nlp(example)
words, char_offsets, pos_tags, ner_tags, entity_types = ([] for i in range(5))
for sent in doc.sents:
for i, token in enumerate(sent):
words.append(str(token))
pos_tags.append(token.tag_)
ner_tags.append(token.ent_type_ if token.ent_type_ else 'O')
char_offsets.append(token.idx)
entity_types.append('O')
result = RelationMention(1, example, [(0, 2), (4, 5)], words, char_offsets, pos_tags=pos_tags,
ner_tags=ner_tags, entity_types=entity_types)
dev_results.append(result)
test_results = []
for example in progressbar.progressbar(test_examples):
doc = spacy_nlp(example)
words, char_offsets, pos_tags, ner_tags, entity_types = ([] for i in range(5))
for sent in doc.sents:
for i, token in enumerate(sent):
words.append(str(token))
pos_tags.append(token.tag_)
ner_tags.append(token.ent_type_ if token.ent_type_ else 'O')
char_offsets.append(token.idx)
entity_types.append('O')
result = RelationMention(1, example, [(0, 2), (4, 5)], words, char_offsets, pos_tags=pos_tags,
ner_tags=ner_tags, entity_types=entity_types)
test_results.append(result)
Cs = [train_results, dev_results, test_results]
Ys = [np.array(train_labels), np.array(dev_labels), np.array(test_labels)]
with open(DATA_FILE7, 'wb') as f:
pickle.dump(Cs, f)
with open(DATA_FILE8, 'wb') as f:
pickle.dump(Ys, f)
print("Done")
| 31.7
| 102
| 0.597142
|
dfedabde77a5cd04811f4dc4e63ab0a2b6e5b327
| 625
|
py
|
Python
|
lumen/showcase/migrations/0006_auto_20210728_0454.py
|
kowabunga314/lumen
|
df2f87ca3c7fda19eafe99e8e59b3376c25cfd80
|
[
"MIT"
] | null | null | null |
lumen/showcase/migrations/0006_auto_20210728_0454.py
|
kowabunga314/lumen
|
df2f87ca3c7fda19eafe99e8e59b3376c25cfd80
|
[
"MIT"
] | null | null | null |
lumen/showcase/migrations/0006_auto_20210728_0454.py
|
kowabunga314/lumen
|
df2f87ca3c7fda19eafe99e8e59b3376c25cfd80
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-28 04:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('showcase', '0005_auto_20210728_0438'),
]
operations = [
migrations.AlterField(
model_name='photo',
name='photo',
field=models.ImageField(default='images/placeholder.jpeg', upload_to='images/'),
),
migrations.AlterField(
model_name='series',
name='thumbnail',
field=models.ImageField(default='images/placeholder.jpeg', upload_to='images/'),
),
]
| 26.041667
| 92
| 0.6
|
571ff120fb77b6904ef297ac3255082a53b00677
| 881
|
py
|
Python
|
modules/tests/morgan.py
|
ansteh/multivariate
|
fbd166f9e9a6d721a1d876b6e46db064f43afe53
|
[
"Apache-2.0"
] | null | null | null |
modules/tests/morgan.py
|
ansteh/multivariate
|
fbd166f9e9a6d721a1d876b6e46db064f43afe53
|
[
"Apache-2.0"
] | null | null | null |
modules/tests/morgan.py
|
ansteh/multivariate
|
fbd166f9e9a6d721a1d876b6e46db064f43afe53
|
[
"Apache-2.0"
] | null | null | null |
import pandas as ps
import numpy as np
import os, sys
sys.path.append('../../modules/')
from analysis.covariance import cov
from analysis.symmetric import isSymmetric
from analysis.definite import isPositiveDefinite
from analysis.correlation import corr
from algorithms.morgan import morgan
data = ps.read_csv(os.path.join(os.path.dirname(__file__), "../resources/apple-tree.csv"), sep = ',')
matrix = data.as_matrix()
matrix = matrix.T
matrix = np.array(matrix, dtype=np.float64)
def testbed():
C = cov(matrix)
print np.all(np.diagonal(C) > 0)
threshold = 1e-6
print 'symmetric:', isSymmetric(C, threshold)
print 'positive definite:', isPositiveDefinite(C)
A = morgan(C)
#print A
print C
print np.dot(A, A.T)
#print np.subtract(C, np.dot(A, A.T))
#print C == np.dot(A, A.T)
#print np.all(C - np.dot(A, A.T) < 1e-6)
return A
| 27.53125
| 101
| 0.684449
|
2b137307b39843adc622ed35bdda087587ba8623
| 24,354
|
bzl
|
Python
|
tensorflow/core/platform/default/build_config.bzl
|
mouse36872/tensorflow
|
64228599fdeeec0bf504485901a8c8e558a5a9ad
|
[
"Apache-2.0"
] | 2
|
2017-09-20T22:52:37.000Z
|
2018-09-26T18:43:27.000Z
|
tensorflow/core/platform/default/build_config.bzl
|
mouse36872/tensorflow
|
64228599fdeeec0bf504485901a8c8e558a5a9ad
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/core/platform/default/build_config.bzl
|
mouse36872/tensorflow
|
64228599fdeeec0bf504485901a8c8e558a5a9ad
|
[
"Apache-2.0"
] | 3
|
2017-09-20T22:52:39.000Z
|
2018-10-14T11:10:21.000Z
|
# Platform-specific build configurations.
load("@com_google_protobuf//:protobuf.bzl", "proto_gen")
load("//tensorflow:tensorflow.bzl", "clean_dep", "if_not_windows")
load("//tensorflow/core/platform:default/build_config_root.bzl", "if_static")
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
load("@local_config_rocm//rocm:build_defs.bzl", "if_rocm")
load(
"//third_party/mkl:build_defs.bzl",
"if_mkl_ml",
)
# Appends a suffix to a list of deps.
def tf_deps(deps, suffix):
tf_deps = []
# If the package name is in shorthand form (ie: does not contain a ':'),
# expand it to the full name.
for dep in deps:
tf_dep = dep
if not ":" in dep:
dep_pieces = dep.split("/")
tf_dep += ":" + dep_pieces[len(dep_pieces) - 1]
tf_deps += [tf_dep + suffix]
return tf_deps
# Modified from @cython//:Tools/rules.bzl
def pyx_library(
name,
deps = [],
py_deps = [],
srcs = [],
testonly = None,
srcs_version = "PY2AND3",
**kwargs):
"""Compiles a group of .pyx / .pxd / .py files.
First runs Cython to create .cpp files for each input .pyx or .py + .pxd
pair. Then builds a shared object for each, passing "deps" to each cc_binary
rule (includes Python headers by default). Finally, creates a py_library rule
with the shared objects and any pure Python "srcs", with py_deps as its
dependencies; the shared objects can be imported like normal Python files.
Args:
name: Name for the rule.
deps: C/C++ dependencies of the Cython (e.g. Numpy headers).
py_deps: Pure Python dependencies of the final library.
srcs: .py, .pyx, or .pxd files to either compile or pass through.
**kwargs: Extra keyword arguments passed to the py_library.
"""
# First filter out files that should be run compiled vs. passed through.
py_srcs = []
pyx_srcs = []
pxd_srcs = []
for src in srcs:
if src.endswith(".pyx") or (src.endswith(".py") and
src[:-3] + ".pxd" in srcs):
pyx_srcs.append(src)
elif src.endswith(".py"):
py_srcs.append(src)
else:
pxd_srcs.append(src)
if src.endswith("__init__.py"):
pxd_srcs.append(src)
# Invoke cython to produce the shared object libraries.
for filename in pyx_srcs:
native.genrule(
name = filename + "_cython_translation",
srcs = [filename],
outs = [filename.split(".")[0] + ".cpp"],
# Optionally use PYTHON_BIN_PATH on Linux platforms so that python 3
# works. Windows has issues with cython_binary so skip PYTHON_BIN_PATH.
cmd = "PYTHONHASHSEED=0 $(location @cython//:cython_binary) --cplus $(SRCS) --output-file $(OUTS)",
testonly = testonly,
tools = ["@cython//:cython_binary"] + pxd_srcs,
)
shared_objects = []
for src in pyx_srcs:
stem = src.split(".")[0]
shared_object_name = stem + ".so"
native.cc_binary(
name = shared_object_name,
srcs = [stem + ".cpp"],
deps = deps + ["@org_tensorflow//third_party/python_runtime:headers"],
linkshared = 1,
testonly = testonly,
)
shared_objects.append(shared_object_name)
# Now create a py_library with these shared objects as data.
native.py_library(
name = name,
srcs = py_srcs,
deps = py_deps,
srcs_version = srcs_version,
data = shared_objects,
testonly = testonly,
**kwargs
)
def _proto_cc_hdrs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
return ret
def _proto_cc_srcs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
return ret
def _proto_py_outs(srcs, use_grpc_plugin = False):
ret = [s[:-len(".proto")] + "_pb2.py" for s in srcs]
if use_grpc_plugin:
ret += [s[:-len(".proto")] + "_pb2_grpc.py" for s in srcs]
return ret
# Re-defined protocol buffer rule to allow building "header only" protocol
# buffers, to avoid duplicate registrations. Also allows non-iterable cc_libs
# containing select() statements.
def cc_proto_library(
name,
srcs = [],
deps = [],
cc_libs = [],
include = None,
protoc = "@com_google_protobuf//:protoc",
internal_bootstrap_hack = False,
use_grpc_plugin = False,
use_grpc_namespace = False,
make_default_target_header_only = False,
protolib_name = None,
protolib_deps = [],
**kargs):
"""Bazel rule to create a C++ protobuf library from proto source files.
Args:
name: the name of the cc_proto_library.
srcs: the .proto files of the cc_proto_library.
deps: a list of dependency labels; must be cc_proto_library.
cc_libs: a list of other cc_library targets depended by the generated
cc_library.
include: a string indicating the include path of the .proto files.
protoc: the label of the protocol compiler to generate the sources.
internal_bootstrap_hack: a flag indicate the cc_proto_library is used only
for bootstraping. When it is set to True, no files will be generated.
The rule will simply be a provider for .proto files, so that other
cc_proto_library can depend on it.
use_grpc_plugin: a flag to indicate whether to call the grpc C++ plugin
when processing the proto files.
use_grpc_namespace: the namespace for the grpc services.
make_default_target_header_only: Controls the naming of generated
rules. If True, the `name` rule will be header-only, and an _impl rule
will contain the implementation. Otherwise the header-only rule (name
+ "_headers_only") must be referred to explicitly.
protolib_name: the name for the proto library generated by this rule.
protolib_deps: The dependencies to proto libraries.
**kargs: other keyword arguments that are passed to cc_library.
"""
includes = []
if include != None:
includes = [include]
if protolib_name == None:
protolib_name = name
if not protolib_deps:
protolib_deps = deps
if internal_bootstrap_hack:
# For pre-checked-in generated files, we add the internal_bootstrap_hack
# which will skip the codegen action.
proto_gen(
name = protolib_name + "_genproto",
srcs = srcs,
includes = includes,
protoc = protoc,
visibility = ["//visibility:public"],
deps = [s + "_genproto" for s in protolib_deps],
)
# An empty cc_library to make rule dependency consistent.
native.cc_library(
name = name,
**kargs
)
return
grpc_cpp_plugin = None
plugin_options = []
if use_grpc_plugin:
grpc_cpp_plugin = "//external:grpc_cpp_plugin"
if use_grpc_namespace:
plugin_options = ["services_namespace=grpc"]
gen_srcs = _proto_cc_srcs(srcs, use_grpc_plugin)
gen_hdrs = _proto_cc_hdrs(srcs, use_grpc_plugin)
outs = gen_srcs + gen_hdrs
proto_gen(
name = protolib_name + "_genproto",
srcs = srcs,
outs = outs,
gen_cc = 1,
includes = includes,
plugin = grpc_cpp_plugin,
plugin_language = "grpc",
plugin_options = plugin_options,
protoc = protoc,
visibility = ["//visibility:public"],
deps = [s + "_genproto" for s in protolib_deps],
)
if use_grpc_plugin:
cc_libs += select({
clean_dep("//tensorflow:linux_s390x"): ["//external:grpc_lib_unsecure"],
"//conditions:default": ["//external:grpc_lib"],
})
if make_default_target_header_only:
header_only_name = name
impl_name = name + "_impl"
else:
header_only_name = name + "_headers_only"
impl_name = name
native.cc_library(
name = impl_name,
srcs = gen_srcs,
hdrs = gen_hdrs,
deps = cc_libs + deps,
includes = includes,
alwayslink = 1,
**kargs
)
native.cc_library(
name = header_only_name,
deps = ["@com_google_protobuf//:protobuf_headers"] + if_static([impl_name]),
hdrs = gen_hdrs,
**kargs
)
# Temporarily also add an alias with the 'protolib_name'. So far we relied
# on copybara to switch dependencies to the _cc dependencies. Now that these
# copybara rules are removed, we need to first change the internal BUILD
# files to depend on the correct targets instead, then this can be removed.
# TODO(b/143648532): Remove this once all reverse dependencies are migrated.
if protolib_name != name:
native.alias(
name = protolib_name,
actual = name,
visibility = kargs["visibility"],
)
# Re-defined protocol buffer rule to bring in the change introduced in commit
# https://github.com/google/protobuf/commit/294b5758c373cbab4b72f35f4cb62dc1d8332b68
# which was not part of a stable protobuf release in 04/2018.
# TODO(jsimsa): Remove this once the protobuf dependency version is updated
# to include the above commit.
def py_proto_library(
name,
srcs = [],
deps = [],
py_libs = [],
py_extra_srcs = [],
include = None,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
use_grpc_plugin = False,
**kargs):
"""Bazel rule to create a Python protobuf library from proto source files
NOTE: the rule is only an internal workaround to generate protos. The
interface may change and the rule may be removed when bazel has introduced
the native rule.
Args:
name: the name of the py_proto_library.
srcs: the .proto files of the py_proto_library.
deps: a list of dependency labels; must be py_proto_library.
py_libs: a list of other py_library targets depended by the generated
py_library.
py_extra_srcs: extra source files that will be added to the output
py_library. This attribute is used for internal bootstrapping.
include: a string indicating the include path of the .proto files.
default_runtime: the implicitly default runtime which will be depended on by
the generated py_library target.
protoc: the label of the protocol compiler to generate the sources.
use_grpc_plugin: a flag to indicate whether to call the Python C++ plugin
when processing the proto files.
**kargs: other keyword arguments that are passed to py_library.
"""
outs = _proto_py_outs(srcs, use_grpc_plugin)
includes = []
if include != None:
includes = [include]
grpc_python_plugin = None
if use_grpc_plugin:
grpc_python_plugin = "//external:grpc_python_plugin"
# Note: Generated grpc code depends on Python grpc module. This dependency
# is not explicitly listed in py_libs. Instead, host system is assumed to
# have grpc installed.
proto_gen(
name = name + "_genproto",
srcs = srcs,
outs = outs,
gen_py = 1,
includes = includes,
plugin = grpc_python_plugin,
plugin_language = "grpc",
protoc = protoc,
visibility = ["//visibility:public"],
deps = [s + "_genproto" for s in deps],
)
if default_runtime and not default_runtime in py_libs + deps:
py_libs = py_libs + [default_runtime]
native.py_library(
name = name,
srcs = outs + py_extra_srcs,
deps = py_libs + deps,
imports = includes,
**kargs
)
def tf_proto_library_cc(
name,
srcs = [],
has_services = None,
protodeps = [],
visibility = None,
testonly = 0,
cc_libs = [],
cc_stubby_versions = None,
cc_grpc_version = None,
use_grpc_namespace = False,
j2objc_api_version = 1,
cc_api_version = 2,
js_codegen = "jspb",
make_default_target_header_only = False):
js_codegen = js_codegen # unused argument
native.filegroup(
name = name + "_proto_srcs",
srcs = srcs + tf_deps(protodeps, "_proto_srcs"),
testonly = testonly,
visibility = visibility,
)
use_grpc_plugin = None
if cc_grpc_version:
use_grpc_plugin = True
protolib_deps = tf_deps(protodeps, "")
cc_deps = tf_deps(protodeps, "_cc")
cc_name = name + "_cc"
if not srcs:
# This is a collection of sub-libraries. Build header-only and impl
# libraries containing all the sources.
proto_gen(
name = name + "_genproto",
protoc = "@com_google_protobuf//:protoc",
visibility = ["//visibility:public"],
deps = [s + "_genproto" for s in protolib_deps],
)
# Temporarily also add an alias with 'name'. So far we relied on
# copybara to switch dependencies to the _cc dependencies. Now that these
# copybara rules are removed, we need to change the internal BUILD files to
# depend on the correct targets instead.
# TODO(b/143648532): Remove this once all reverse dependencies are
# migrated.
native.alias(
name = name,
actual = cc_name,
testonly = testonly,
visibility = visibility,
)
native.cc_library(
name = cc_name,
deps = cc_deps + ["@com_google_protobuf//:protobuf_headers"] + if_static([name + "_cc_impl"]),
testonly = testonly,
visibility = visibility,
)
native.cc_library(
name = cc_name + "_impl",
deps = [s + "_impl" for s in cc_deps] + ["@com_google_protobuf//:cc_wkt_protos"],
)
return
cc_proto_library(
name = cc_name,
protolib_name = name,
testonly = testonly,
srcs = srcs,
cc_libs = cc_libs + if_static(
["@com_google_protobuf//:protobuf"],
["@com_google_protobuf//:protobuf_headers"],
),
copts = if_not_windows([
"-Wno-unknown-warning-option",
"-Wno-unused-but-set-variable",
"-Wno-sign-compare",
]),
make_default_target_header_only = make_default_target_header_only,
protoc = "@com_google_protobuf//:protoc",
use_grpc_plugin = use_grpc_plugin,
use_grpc_namespace = use_grpc_namespace,
visibility = visibility,
deps = cc_deps + ["@com_google_protobuf//:cc_wkt_protos"],
protolib_deps = protolib_deps + ["@com_google_protobuf//:cc_wkt_protos"],
)
def tf_proto_library_py(
name,
srcs = [],
protodeps = [],
deps = [],
visibility = None,
testonly = 0,
srcs_version = "PY2AND3",
use_grpc_plugin = False):
py_deps = tf_deps(protodeps, "_py")
py_name = name + "_py"
if not srcs:
# This is a collection of sub-libraries. Build header-only and impl
# libraries containing all the sources.
proto_gen(
name = py_name + "_genproto",
protoc = "@com_google_protobuf//:protoc",
visibility = ["//visibility:public"],
deps = [s + "_genproto" for s in py_deps],
)
native.py_library(
name = py_name,
deps = py_deps + [clean_dep("@com_google_protobuf//:protobuf_python")],
testonly = testonly,
visibility = visibility,
)
return
py_proto_library(
name = py_name,
testonly = testonly,
srcs = srcs,
default_runtime = clean_dep("@com_google_protobuf//:protobuf_python"),
protoc = "@com_google_protobuf//:protoc",
srcs_version = srcs_version,
use_grpc_plugin = use_grpc_plugin,
visibility = visibility,
deps = deps + py_deps + [clean_dep("@com_google_protobuf//:protobuf_python")],
)
def tf_jspb_proto_library(**kwargs):
pass
def tf_nano_proto_library(**kwargs):
pass
def tf_proto_library(
name,
srcs = [],
has_services = None,
protodeps = [],
visibility = None,
testonly = 0,
cc_libs = [],
cc_api_version = 2,
cc_grpc_version = None,
j2objc_api_version = 1,
js_codegen = "jspb",
make_default_target_header_only = False,
exports = []):
"""Make a proto library, possibly depending on other proto libraries."""
_ignore = (js_codegen, exports)
tf_proto_library_cc(
name = name,
testonly = testonly,
srcs = srcs,
cc_grpc_version = cc_grpc_version,
cc_libs = cc_libs,
make_default_target_header_only = make_default_target_header_only,
protodeps = protodeps,
visibility = visibility,
)
tf_proto_library_py(
name = name,
testonly = testonly,
srcs = srcs,
protodeps = protodeps,
srcs_version = "PY2AND3",
use_grpc_plugin = has_services,
visibility = visibility,
)
# A list of all files under platform matching the pattern in 'files'. In
# contrast with 'tf_platform_srcs' below, which seletive collects files that
# must be compiled in the 'default' platform, this is a list of all headers
# mentioned in the platform/* files.
def tf_platform_hdrs(files):
return native.glob(["*/" + f for f in files])
def tf_platform_srcs(files):
base_set = ["default/" + f for f in files]
windows_set = base_set + ["windows/" + f for f in files]
posix_set = base_set + ["posix/" + f for f in files]
return select({
clean_dep("//tensorflow:windows"): native.glob(windows_set),
"//conditions:default": native.glob(posix_set),
})
def tf_additional_lib_hdrs(exclude = []):
windows_hdrs = native.glob([
"default/*.h",
"windows/*.h",
"posix/error.h",
], exclude = exclude + [
"default/subprocess.h",
"default/posix_file_system.h",
])
return select({
clean_dep("//tensorflow:windows"): windows_hdrs,
"//conditions:default": native.glob([
"default/*.h",
"posix/*.h",
], exclude = exclude),
})
def tf_additional_lib_srcs(exclude = []):
windows_srcs = native.glob([
"default/*.cc",
"windows/*.cc",
"posix/error.cc",
], exclude = exclude + [
"default/env.cc",
"default/env_time.cc",
"default/load_library.cc",
"default/net.cc",
"default/port.cc",
"default/posix_file_system.cc",
"default/subprocess.cc",
"default/stacktrace_handler.cc",
])
return select({
clean_dep("//tensorflow:windows"): windows_srcs,
"//conditions:default": native.glob([
"default/*.cc",
"posix/*.cc",
], exclude = exclude),
})
def tf_additional_monitoring_hdrs():
return []
def tf_additional_monitoring_srcs():
return [
"default/monitoring.cc",
]
def tf_additional_proto_hdrs():
return [
"default/integral_types.h",
"default/logging.h",
]
def tf_additional_all_protos():
return [clean_dep("//tensorflow/core:protos_all")]
def tf_protos_all_impl():
return [
clean_dep("//tensorflow/core:autotuning_proto_cc_impl"),
clean_dep("//tensorflow/core:conv_autotuning_proto_cc_impl"),
clean_dep("//tensorflow/core:protos_all_cc_impl"),
]
def tf_protos_all():
return if_static(
extra_deps = tf_protos_all_impl(),
otherwise = [clean_dep("//tensorflow/core:protos_all_cc")],
)
def tf_protos_grappler_impl():
return [clean_dep("//tensorflow/core/grappler/costs:op_performance_data_cc_impl")]
def tf_protos_grappler():
return if_static(
extra_deps = tf_protos_grappler_impl(),
otherwise = [clean_dep("//tensorflow/core/grappler/costs:op_performance_data_cc")],
)
def tf_additional_device_tracer_srcs():
return ["device_tracer.cc"]
def tf_additional_cupti_utils_cuda_deps():
return []
def tf_additional_cupti_test_flags():
return []
def tf_additional_test_deps():
return []
def tf_additional_test_srcs():
return [
"default/test.cc",
"default/test_benchmark.cc",
]
def tf_kernel_tests_linkstatic():
return 0
def tf_additional_lib_deps():
"""Additional dependencies needed to build TF libraries."""
return [
"@com_google_absl//absl/base:base",
"@com_google_absl//absl/container:inlined_vector",
"@com_google_absl//absl/types:span",
"@com_google_absl//absl/types:optional",
] + if_static(
[clean_dep("@nsync//:nsync_cpp")],
[clean_dep("@nsync//:nsync_headers")],
)
def tf_additional_core_deps():
return select({
clean_dep("//tensorflow:android"): [],
clean_dep("//tensorflow:ios"): [],
clean_dep("//tensorflow:linux_s390x"): [],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:no_gcp_support"): [],
"//conditions:default": [
"//tensorflow/core/platform/cloud:gcs_file_system",
],
}) + select({
clean_dep("//tensorflow:android"): [],
clean_dep("//tensorflow:ios"): [],
clean_dep("//tensorflow:linux_s390x"): [],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:no_hdfs_support"): [],
"//conditions:default": [
clean_dep("//tensorflow/core/platform/hadoop:hadoop_file_system"),
],
}) + select({
clean_dep("//tensorflow:android"): [],
clean_dep("//tensorflow:ios"): [],
clean_dep("//tensorflow:linux_s390x"): [],
clean_dep("//tensorflow:windows"): [],
clean_dep("//tensorflow:no_aws_support"): [],
"//conditions:default": [
clean_dep("//tensorflow/core/platform/s3:s3_file_system"),
],
})
def tf_lib_proto_parsing_deps():
return [
":protos_all_cc",
clean_dep("//third_party/eigen3"),
clean_dep("//tensorflow/core/platform/default/build_config:proto_parsing"),
]
def tf_py_clif_cc(name, visibility = None, **kwargs):
pass
def tf_pyclif_proto_library(
name,
proto_lib,
proto_srcfile = "",
visibility = None,
**kwargs):
native.filegroup(name = name)
native.filegroup(name = name + "_pb2")
def tf_additional_binary_deps():
return [clean_dep("@nsync//:nsync_cpp")] + if_cuda(
[
clean_dep("//tensorflow/stream_executor:cuda_platform"),
],
) + if_rocm(
[
clean_dep("//tensorflow/stream_executor:rocm_platform"),
clean_dep("//tensorflow/core/platform/default/build_config:rocm"),
],
) + [
# TODO(allenl): Split these out into their own shared objects (they are
# here because they are shared between contrib/ op shared objects and
# core).
clean_dep("//tensorflow/core/kernels:lookup_util"),
clean_dep("//tensorflow/core/util/tensor_bundle"),
] + if_mkl_ml(
[
clean_dep("//third_party/mkl:intel_binary_blob"),
],
)
def tf_additional_rpc_deps():
return []
def tf_additional_tensor_coding_deps():
return []
def tf_fingerprint_deps():
return [
"@farmhash_archive//:farmhash",
]
def tf_protobuf_deps():
return if_static(
[
clean_dep("@com_google_protobuf//:protobuf"),
],
otherwise = [clean_dep("@com_google_protobuf//:protobuf_headers")],
)
def tf_protobuf_compiler_deps():
return if_static(
[
clean_dep("@com_google_protobuf//:protobuf"),
],
otherwise = [clean_dep("@com_google_protobuf//:protobuf_headers")],
)
| 33.179837
| 111
| 0.612795
|
5822ff7a0b5e74cb4dda79bc0e44e7d8f2d9bdd7
| 284
|
py
|
Python
|
admin/actions/deploy/samples/echobody.py
|
sciabarra/io-sdk
|
4fa4e3dbf56a653162730ccf6b74845b97b915e4
|
[
"MIT"
] | 6
|
2020-06-16T06:46:15.000Z
|
2020-07-26T21:44:40.000Z
|
admin/actions/deploy/samples/echobody.py
|
sciabarra/io-sdk
|
4fa4e3dbf56a653162730ccf6b74845b97b915e4
|
[
"MIT"
] | 32
|
2020-06-15T07:18:03.000Z
|
2020-11-28T19:17:36.000Z
|
admin/actions/deploy/samples/echobody.py
|
sciabarra/io-sdk
|
4fa4e3dbf56a653162730ccf6b74845b97b915e4
|
[
"MIT"
] | 18
|
2020-06-15T12:22:05.000Z
|
2020-11-28T19:14:16.000Z
|
import base64
import time
import os
import json
import pip
def main(args):
body = args["__ow_body"]
if args["__ow_headers"]["content-type"] == "application/json":
body = base64.b64decode(body).decode("utf-8")
body = json.loads(body)
return { "body": body }
| 20.285714
| 66
| 0.651408
|
8a0e509e664833213b66e14d68c19bc6e68361ce
| 4,781
|
py
|
Python
|
2015/advent22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 96
|
2018-04-21T07:53:34.000Z
|
2022-03-15T11:00:02.000Z
|
2015/advent22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 17
|
2019-02-07T05:14:47.000Z
|
2021-12-27T12:11:04.000Z
|
2015/advent22.py
|
AwesomeGitHubRepos/adventofcode
|
84ba7963a5d7905973f14bb1c2e3a59165f8b398
|
[
"MIT"
] | 14
|
2019-02-05T06:34:15.000Z
|
2022-01-24T17:35:00.000Z
|
from collections import namedtuple
from functools import reduce
from heapq import heappop, heappush
from itertools import count
class Spell(namedtuple('BaseSpell',
'name cost effect turns damage heal armour mana')):
def __new__(cls, name, cost, effect=False, turns=None, damage=0, heal=0,
armour=0, mana=0):
return super().__new__(
cls, name, cost, effect, turns, damage, heal, armour, mana)
spells = (
Spell('Magic Missile', 53, damage=4),
Spell('Drain', 73, damage=2, heal=2),
Spell('Shield', 113, effect=True, turns=6, armour=7),
Spell('Poison', 173, effect=True, turns=6, damage=3),
Spell('Recharge', 229, effect=True, turns=5, mana=101),
)
class State(object):
def __init__(self, hp, mana, boss_hp, boss_damage,
mana_spent=0, effects=None, hard=False,
parent=None, spell_cast=None):
self.hp = hp
self.mana = mana
self.boss_hp = boss_hp
self.boss_damage = boss_damage
self.mana_spent = mana_spent
self.effects = effects or ()
self.hard = hard
self._parent = parent
self._spell_cast = spell_cast
def __eq__(self, other):
if not isinstance(other, State):
return NotImplemented
return all(getattr(self, k) == getattr(other, k)
for k in vars(self) if k[0] != '_')
def __hash__(self):
return reduce(lambda a, b: a ^ hash(b),
(v for k, v in vars(self).items() if k[0] != '_'), 0)
def iter_path(self):
if self._parent is None:
return
yield from self._parent.iter_path()
yield self._spell_cast
def process_effects(self, effects, hp, mana, boss_hp):
remaining_effects = []
armour = 0 # either Shield is in effect or it is not
for timer, effect in self.effects:
hp += effect.heal
mana += effect.mana
boss_hp -= effect.damage
armour = max(armour, effect.armour)
if timer > 1:
remaining_effects.append((timer - 1, effect))
return tuple(remaining_effects), hp, mana, boss_hp, armour
def boss_turn(self):
self.effects, self.hp, self.mana, self.boss_hp, armour = (
self.process_effects(
self.effects, self.hp, self.mana, self.boss_hp))
# only if the boss is still alive can they attack!
if self.boss_hp > 0:
self.hp -= max(1, self.boss_damage - armour)
def transitions(self):
# Player turn first
effects, hp, mana, boss_hp, __ = self.process_effects(
self.effects, self.hp - int(self.hard), self.mana, self.boss_hp)
for spell in spells:
if spell.cost > mana or any(spell is s for t, s in effects):
# can't cast spells for which we have no mana or in effect
continue
new_state = State(
hp, mana - spell.cost, boss_hp, self.boss_damage,
self.mana_spent + spell.cost, effects, hard=self.hard,
parent=self, spell_cast=spell.name)
if not spell.effect:
new_state.hp += spell.heal
new_state.boss_hp -= spell.damage
else:
new_state.effects = new_state.effects + ((spell.turns, spell),)
# Boss turn next
new_state.boss_turn()
# No point in playing a turn that has the player losing
if new_state.hp > 0:
yield new_state
def search_a_star(start):
open_states = {start}
pqueue = [(0, start)]
closed_states = set()
unique = count()
while open_states:
current = heappop(pqueue)[-1]
if current.boss_hp < 1:
return current
open_states.remove(current)
closed_states.add(current)
for state in current.transitions():
if state in closed_states or state in open_states:
continue
open_states.add(state)
heappush(pqueue, (state.mana_spent, next(unique), state))
if __name__ == '__main__':
import sys
filename = sys.argv[-1]
with open(filename) as f:
boss_hp = int(next(f).rpartition(':')[-1])
boss_attack = int(next(f).rpartition(':')[-1])
player_hp, player_mana = 50, 500
start = State(player_hp, player_mana, boss_hp, boss_attack)
end = search_a_star(start)
print('Part 1:', end.mana_spent)
if '-v' in sys.argv:
print(*end.iter_path(), sep=' -> ')
start.hard = True
end = search_a_star(start)
print('Part 2:', end.mana_spent)
if '-v' in sys.argv:
print(*end.iter_path(), sep=' -> ')
| 35.414815
| 79
| 0.576239
|
256bcb3494fa644476975dcb3f3e4c5a6f9e7116
| 753
|
py
|
Python
|
fairseq/criterions/__init__.py
|
atliSig/entropyRegularization
|
34a127cef30237a6b72d7b5b33d5f8f263904fc0
|
[
"MIT"
] | 8
|
2020-05-03T19:20:00.000Z
|
2021-04-21T06:38:53.000Z
|
fairseq/criterions/__init__.py
|
atliSig/entropyRegularization
|
34a127cef30237a6b72d7b5b33d5f8f263904fc0
|
[
"MIT"
] | null | null | null |
fairseq/criterions/__init__.py
|
atliSig/entropyRegularization
|
34a127cef30237a6b72d7b5b33d5f8f263904fc0
|
[
"MIT"
] | 1
|
2020-10-29T15:32:38.000Z
|
2020-10-29T15:32:38.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from fairseq import registry
from fairseq.criterions.fairseq_criterion import FairseqCriterion
build_criterion, register_criterion, CRITERION_REGISTRY = registry.setup_registry(
'--criterion',
base_class=FairseqCriterion,
default='cross_entropy',
)
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.criterions.' + module)
| 31.375
| 82
| 0.754316
|
6680e2d6f42b8aef1c1ac53f584946e28c7b07bb
| 1,033
|
py
|
Python
|
discogs2xlsx/__init__.py
|
fscm/discogs2xlsx
|
b478c78a61cc90ef981b1c9372dabe42538e78aa
|
[
"MIT"
] | 5
|
2021-02-18T18:21:38.000Z
|
2021-11-08T11:42:32.000Z
|
discogs2xlsx/__init__.py
|
fscm/discogs2xlsx
|
b478c78a61cc90ef981b1c9372dabe42538e78aa
|
[
"MIT"
] | null | null | null |
discogs2xlsx/__init__.py
|
fscm/discogs2xlsx
|
b478c78a61cc90ef981b1c9372dabe42538e78aa
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
#
# copyright: 2020-2021, Frederico Martins
# author: Frederico Martins <http://github.com/fscm>
# license: SPDX-License-Identifier: MIT
"""discogs2xlsx.
Export your Discogs collection or wantlist into a xlsx file.
This tool will try to export your collection or wantlist from Discogs
into a `.xlsx` file.
.. note::
The time required to perform the export will depend on the size of
your collection, or wantlist.
Discogs requests to the API are throttled to 60 per minute for
authenticated requests, for that reason for large collections, or
wantlists, the export can take hours to perform.
A simple example of how to use this tool::
discogs2xlsx -a my_discogs_secret_token
"""
from typing import Final
__author__: Final[str] = 'Frederico Martins'
__license__: Final[str] = 'MIT'
__project__: Final[str] = __package__
__version__: Final[str] = '0.3.0'
DEFAULT_FILE_COLLECTION: Final[str] = 'discogs-collection.xlsx'
DEFAULT_FILE_WANTLIST: Final[str] = 'discogs-wantlist.xlsx'
| 29.514286
| 70
| 0.745402
|
a3bbbc82f2b3b34be50a47c2f3d74b98e012ec04
| 805
|
py
|
Python
|
daiquiri/jobs/migrations/0010_add_fields.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 14
|
2018-12-23T18:35:02.000Z
|
2021-12-15T04:55:12.000Z
|
daiquiri/jobs/migrations/0010_add_fields.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 40
|
2018-12-20T12:44:05.000Z
|
2022-03-21T11:35:20.000Z
|
daiquiri/jobs/migrations/0010_add_fields.py
|
agy-why/daiquiri
|
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
|
[
"Apache-2.0"
] | 5
|
2019-05-16T08:03:35.000Z
|
2021-08-23T20:03:11.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-24 15:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daiquiri_jobs', '0009_meta'),
]
operations = [
migrations.AddField(
model_name='job',
name='max_records',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='job',
name='response_format',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='job',
name='run_id',
field=models.CharField(blank=True, max_length=64, null=True),
),
]
| 25.967742
| 73
| 0.581366
|
f58902e03e46284be99d9528a3e4d6e87de13bdb
| 25,864
|
py
|
Python
|
interface_grafica/doc_qrc.py
|
GTL98/Exon-Finder
|
b27501207338c728d0cccfed64cd886765bb96b4
|
[
"MIT"
] | null | null | null |
interface_grafica/doc_qrc.py
|
GTL98/Exon-Finder
|
b27501207338c728d0cccfed64cd886765bb96b4
|
[
"MIT"
] | null | null | null |
interface_grafica/doc_qrc.py
|
GTL98/Exon-Finder
|
b27501207338c728d0cccfed64cd886765bb96b4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x10\x08\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3\x3e\x61\xcb\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x03\xb1\x00\x00\x03\xb1\
\x01\xf5\x83\xed\x49\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x0f\x85\x49\x44\
\x41\x54\x78\x9c\xed\x9d\x79\x90\x55\xc5\x15\xc6\x7f\x03\x33\xc2\
\x08\x08\x38\x2c\x0a\x2a\x0c\x2a\x51\x51\x06\x63\xdc\x80\x18\x35\
\x01\x2d\xd7\xb8\xc4\x2d\x2e\x95\xc4\x32\x9a\x94\xc1\x24\xa0\x31\
\x96\x89\xd1\x44\xa9\x88\x6b\xb9\x9b\x18\x24\x6a\x5c\x22\x51\x41\
\xc5\x18\x17\x40\x4b\x1c\x44\xc1\x48\xc0\xb8\xa0\xa0\x20\x10\x71\
\x90\x1d\x66\xde\xe4\x8f\xf3\x9e\x0c\x6f\xde\xed\xe5\xbe\xdb\xdd\
\xf7\xbd\x99\xaf\xea\xd4\xa0\xf7\xbe\xdb\x5f\xf7\xed\xdb\x7d\xfa\
\xf4\xe9\x73\xa0\x3c\xd1\x00\x34\x47\xc8\x2d\x01\x79\x85\xc0\xcd\
\x44\xb7\x45\x43\x87\x80\xc4\x5c\xe2\x4b\xc5\xb5\x1d\xbd\xb1\x48\
\x07\x54\xf5\x5d\x5d\xae\x1d\xa0\x41\x71\x6d\x27\x6f\x2c\xd2\x81\
\xbe\x8a\x6b\x65\xdb\x01\x96\x28\xae\x1d\x00\x54\xf8\x22\x12\x18\
\x15\x48\x7d\xa3\xb0\xa4\xd2\x17\x13\xcf\x98\x07\x1c\x13\x71\xad\
\x27\x70\x16\xb0\xdc\x1f\x9d\x60\xe8\x03\xd4\x28\xae\xcf\x2b\xe7\
\x0e\xa0\xc2\x03\x5e\x58\xa4\x1f\x73\xcb\x75\x28\xec\x87\x4c\x03\
\xe5\x3a\xc5\x25\x81\x0c\xb0\x6b\xb9\x36\xd0\x52\x60\x56\x68\x12\
\x29\xc7\x6b\xc0\xd2\x72\xed\x00\x00\x93\x43\x13\x48\x39\xfe\x01\
\xe5\xad\x0d\xd7\x00\x1f\x03\x5d\x42\x13\x49\x21\xd6\x03\x03\x81\
\x95\xe5\x3c\x02\x7c\x0e\x4c\x0c\x4d\x22\xa5\xb8\x0f\x58\x19\x9a\
\x84\x0f\xd4\x02\x9b\x88\x36\x85\xb6\x45\xd9\x9c\x6d\x17\x00\xca\
\x75\x19\x98\xc3\x22\x60\x02\xf0\x6b\xcd\x7d\xcb\x81\x0b\x81\xb5\
\xce\x19\xb9\x43\x17\xe0\x2e\xf4\x96\xce\xeb\x91\x76\x69\x33\xa8\
\x06\xde\x47\xff\x65\xbc\x02\x74\x0b\xc4\xb1\x58\x74\x01\x5e\x44\
\x5f\xc7\xf7\x90\xf6\x68\x73\x38\x02\x68\xc4\xac\x13\xa8\x2c\x67\
\x69\x44\x0d\x30\x13\x7d\xdd\x1a\x81\xc3\xc3\x50\x4c\x07\x2e\xc5\
\x6c\x8e\x7c\x1f\x18\x12\x88\xa3\x2d\x06\x03\x0b\x31\xab\xd7\xd8\
\x40\x1c\x53\x83\x0a\xe0\x51\xcc\x1a\x6b\x35\x70\x62\x18\x9a\xc6\
\x38\x11\xe1\x69\x52\x9f\x47\x28\xef\x25\xbf\x31\xaa\x81\x17\x30\
\xd7\x98\x27\x01\x5d\x83\x30\x8d\x46\x35\xe2\xd4\x92\xc1\xac\x0e\
\x33\x69\xb7\x85\x6c\x83\x2e\x98\xcd\x99\x39\x79\x17\x38\x24\x08\
\xd3\xd6\x38\x14\xe1\x63\xca\xbd\xfd\xe5\x47\x60\x07\x60\x3a\xe6\
\x0d\xd9\x04\xdc\x01\xf4\x08\x41\x36\x5b\xee\x1d\x59\x1e\xa6\x9c\
\xa7\x53\xba\xab\x1a\x2f\xe8\x84\x6c\x0b\x9b\x36\x68\x33\xb0\x0c\
\x38\x1b\x7f\xbb\x8c\x1d\xb2\xe5\x2d\xb3\xe4\xf9\x00\x52\xbf\x76\
\x68\x50\x01\x5c\x85\xf9\x7c\x9a\x93\x37\x81\xd1\x8e\xb9\x8d\xce\
\x96\x63\xc3\x2b\x03\xfc\x8e\x76\x85\xcf\x1a\xa3\x90\x2d\x64\x9b\
\xc6\x6e\x06\xfe\x85\xda\xe5\x2a\x0e\xf6\x05\xa6\xc6\xe0\xb2\x12\
\x38\x3e\x61\x2e\x6d\x0a\x7d\x80\x67\xb0\x6f\xf8\x0c\xb2\xbc\xdc\
\xa3\xc8\xf2\x07\x00\x77\x63\x37\xcf\xe7\xe4\x05\xc4\x09\xa6\x1d\
\x45\xa2\x03\x30\x0e\xd8\x80\xfd\x4b\xd8\x04\xdc\x89\x6c\xb3\xda\
\x60\x60\xf6\x77\x71\x36\xad\x36\x64\xf9\x96\xf3\xae\x6e\x10\xec\
\x0e\x4c\xc3\xfe\x85\x34\x23\xbb\x6d\x93\x80\xaf\x69\xca\xa8\x45\
\xd6\xf3\x1b\x63\x96\xf3\x32\xb0\x57\xf1\x55\x6d\x87\x0a\xc7\x03\
\x9f\x10\xef\x05\x35\x01\x53\x80\xfd\xf3\x9e\xb9\x0f\xd2\x41\xb6\
\xc4\x7c\xee\x67\xc0\xb9\xb4\x2b\x7a\xde\xd0\x13\xb8\x89\xf8\x7e\
\x05\x4d\xc0\x63\xc0\x09\xd9\xbf\x71\xe6\xf8\xdc\x14\x73\x53\x96\
\x4f\x3b\x02\x60\x37\xe4\xcb\xb5\x5d\x32\x26\x21\x53\x28\x5e\xc9\
\x6c\x47\x42\x38\x04\x98\x81\x9f\x17\x3f\x83\xf4\x98\xa1\xdb\x91\
\x87\x91\x88\x22\xe6\xe2\xc5\xcf\xc2\xd3\x9a\xbe\x1c\x14\x89\x6a\
\x60\x28\x30\x2c\xfb\x77\x57\xa0\x3f\x62\x3f\xcf\xcd\x97\x5b\x80\
\x55\x59\x59\x0a\x2c\x00\xfe\x03\xcc\x06\x3e\x28\xb2\xfc\x63\x10\
\xeb\xdb\x37\x8a\x7c\x0e\xc0\x1c\xe0\xb7\xc0\xd3\x09\x3c\xcb\x08\
\xa5\xea\x13\x58\x0b\x9c\x04\x1c\x0d\x1c\x86\x99\xdd\xbb\x4f\xc4\
\xff\x5f\x02\xbc\x84\xf8\xc9\x3f\x8b\x28\x5b\xa6\xa8\x40\x3a\x60\
\x52\x3b\x6e\x5d\x81\xee\xc8\xba\x3e\x93\xd0\x33\xcb\x06\x9d\x80\
\x73\x10\xdf\x37\x57\x8a\xd8\x17\xc0\x3d\x88\x39\x56\x87\xd1\xc8\
\x08\xe2\x82\xc7\x7c\xe0\x34\xda\x0d\x3c\x80\x7c\x61\x17\x23\x5f\
\xaa\x2f\xad\x3b\x83\x0c\xc3\xc3\x0b\xf0\x19\x8e\xbb\xb9\x3f\x5f\
\xe6\x21\xa3\x5c\x9b\xc5\x69\xf8\x7d\xf1\x85\x3a\xc2\x23\x88\xb9\
\xb6\x0e\x59\x8e\x85\xe0\xf1\xcf\x6c\xf9\x6d\x06\xb5\xc8\x4e\x5b\
\xa8\x17\x9f\x2f\x1b\x29\xce\x80\x73\x2f\xa2\xab\xdc\x4b\x71\x06\
\xa5\xbf\x00\x3b\x17\xd1\xae\x25\x81\x93\x10\x6d\x3d\xf4\x4b\x4f\
\xa2\xd3\xdc\x8d\xac\x4a\x5a\xa2\x2f\x30\x1e\x58\x17\xf3\xb9\x0d\
\xc0\x18\xa0\xa3\x65\xbb\x16\x44\x9a\x96\x81\x1d\x11\x53\xe7\xc5\
\x45\x3c\xa3\x19\x58\x8c\xf8\xcd\x35\x64\xa5\x09\x59\xed\xd4\x20\
\xcb\xc3\x41\x40\xef\xa2\x98\xaa\xb1\x16\xb8\x0d\xa9\xcb\x0a\xc5\
\x7d\x3b\x01\xbf\x44\x4e\x24\xc5\x71\x3c\xad\xcf\xfe\xf6\xad\x18\
\xbf\x4d\x1d\xb6\x07\x9e\xc0\xfe\x6b\xc8\x20\x46\x93\xab\x91\x21\
\x76\x7b\xc3\xf2\x76\x05\xbe\x87\x0c\xc9\x2b\x62\x94\x5b\x48\xd6\
\x21\xc7\xd0\x6c\x3b\x57\x0d\x70\x1d\xf1\x46\x84\x2d\xc0\xb5\x40\
\x95\x65\x99\xa9\x42\x57\xe0\x55\xec\x2a\xbe\x02\x19\x46\x07\x27\
\x50\x7e\x47\xc4\x98\x33\x85\x78\xf3\xfc\x46\xe0\x56\x8a\x9f\x9b\
\x77\x06\x6e\x47\xb6\x93\x6d\x39\xcc\x42\x46\xb6\x92\x43\x27\xe0\
\x79\xcc\x2b\xfa\x39\x70\x39\xee\x7c\xf5\x87\x20\x81\x25\x6c\xec\
\x0c\x97\x25\xcc\x61\x77\xc4\xa9\xd3\xb6\x33\xae\x46\x82\x5f\x95\
\x0c\x3a\x00\x8f\x63\x5e\xc1\xfb\xf1\xb7\x15\x7a\x24\x66\x07\x4a\
\x9b\x91\x2f\xf6\x28\x07\x1c\x86\x22\x96\x49\xdb\xd1\xe0\x66\x12\
\x52\x10\x5d\xe3\x37\x98\x55\x28\x94\xb3\x63\x35\xa2\x23\x98\x70\
\xfc\x12\x77\xde\x39\xc7\x63\xde\x19\x73\xf2\x24\x29\x3f\x10\x72\
\x34\x66\x43\xdc\x7c\x64\x48\x0c\x89\x1f\x61\x36\x2f\xcf\xc3\xdd\
\xd1\xeb\xce\xc8\x86\x93\x8d\x7e\xf0\x06\x29\x3d\xe9\x5c\x83\x99\
\xe6\x3d\x03\xd9\x18\x49\x03\x8e\xc5\xcc\x51\xf4\x36\xc7\x3c\x86\
\x22\x2f\xd6\xb4\x13\xcc\x21\xdc\x69\xa6\x48\x4c\x44\x4f\xbc\x1e\
\x39\xbe\x95\x26\x1c\x89\xde\x81\xb3\x09\xf7\x0e\x1c\x95\xc0\xef\
\x31\x57\x12\x5f\x27\x45\x6d\x79\x04\x7a\x0d\x7b\x21\xe9\x8d\xe8\
\x7d\x16\x7a\xfe\xf3\xf0\xa3\x84\x7d\x07\x71\x10\x35\xe9\x04\x4f\
\x93\x82\x9d\xc5\x0a\xe4\xcb\x56\x11\x5d\x43\xfa\x83\x33\x5c\x83\
\xbe\xc1\xcf\xf3\xc4\x65\x17\x60\xae\x01\x9f\x66\xc4\x58\xd6\x0a\
\x3e\x4d\xc1\x27\x22\xd6\x3e\x15\xce\x21\xfd\x71\x7c\x2b\x11\xc3\
\xd5\x41\x8a\x7b\x16\x01\x07\x23\x61\x59\x5c\xa3\x1b\xf0\x10\x30\
\x42\x73\x5f\x33\x70\x1c\x72\xfa\x29\x08\x74\xca\xcb\xb3\x09\x95\
\xf3\x13\xc4\xed\x2b\x4a\x92\xd0\xd4\x07\x13\xff\x40\x47\x48\x59\
\x4c\xa0\x23\xe3\x07\x6a\x88\x6d\x20\x39\x53\xa6\x2e\x16\x50\x52\
\xeb\xe3\x09\x9a\x72\xd2\x2a\x37\x27\x54\x7f\x2b\xdc\xa3\x21\x95\
\xe4\xf2\xc9\x57\x07\xe8\x41\x69\x6e\x5b\x37\xd2\xe2\xd8\x9a\x0f\
\xcd\x70\x3b\xe0\x74\xc5\xf5\xcd\xc0\x1f\x3d\xf0\x48\x1a\x0d\x48\
\xc7\x2e\x35\x74\x44\x0e\x94\x7a\xc3\x28\xd4\x3d\xf2\xd1\x84\xcb\
\xf3\x35\x02\x80\x68\xe1\x71\xcf\xf8\x85\x94\x4d\x88\x6f\x84\x97\
\x11\x40\x67\xc7\xbf\xdf\x03\x07\x57\xf8\x04\x39\x9b\x5f\x6a\xd8\
\x0e\x39\x60\xea\xa5\x03\x1c\xae\xb8\xf6\x05\xf0\x9c\x07\x0e\x2e\
\xf1\xf7\xd0\x04\x62\xe2\x64\x70\x7f\x30\xa4\x1a\xd8\x5b\x71\xfd\
\x25\xfc\xac\x95\x5d\x42\x77\x8a\xa7\x1e\xf8\xb3\x0f\x22\x05\x30\
\x06\x39\x8e\x5e\x08\x07\x00\x03\x5c\x77\x80\x61\xa8\x3b\x59\x29\
\x0e\x9f\xf9\x58\x06\x7c\x48\xf4\x32\xb6\x91\x70\xca\x62\x35\xd1\
\xcb\xbe\x0a\x60\x84\xeb\x29\x40\xe7\xb2\x35\xd7\x71\xf9\xbe\xf0\
\xba\xe2\x5a\x12\x6e\x6b\x71\x31\x45\x73\xbd\xce\xf5\x08\xb0\x8b\
\xe6\xfa\x02\x8b\x67\x5d\x0a\x7c\xcb\xe0\x3e\x9d\x41\xe9\x71\x64\
\x27\x4d\x87\x71\xc8\x01\x52\x13\xfc\x57\x71\xad\x17\xb2\xb9\xb5\
\xca\xf0\x59\x49\xe2\x43\x44\x51\x8d\x7a\x0f\xc3\x5c\x77\x80\xfe\
\x8a\x6b\x0d\x88\x12\x68\x8a\xfd\x89\x4e\x06\x69\x03\x53\xf7\xad\
\xeb\x2c\x9e\xf9\x91\xe6\x7a\x0d\x61\x3a\x00\x88\x53\x4d\x54\x07\
\xe8\xe7\x7a\x0a\x50\xed\x43\xdb\xbc\xfc\xb4\x43\xe5\xff\x0f\x61\
\x43\xb6\x2e\x56\x5c\xeb\xe1\xba\x03\xa8\xfc\xd5\x55\x19\xbe\x4b\
\x0d\xba\x23\xe5\x21\x1d\x32\x54\xed\xdc\xdd\x75\x07\x50\x4d\x31\
\xa5\x1a\x9b\xa0\x10\x74\x4b\xd9\x66\x2f\x2c\x0a\x43\xf5\x8e\x33\
\xae\x3b\xc0\x06\xc5\xb5\xce\x8e\xcb\xf6\x09\xdd\x10\x1f\x32\x19\
\x95\x6a\xf4\x71\x9e\x3e\x5e\xa5\xf8\xf4\x72\x5c\xb6\x4f\xe8\x1c\
\x2f\x43\x76\x80\x5a\xc5\xb5\x06\xd7\xc3\xb0\xaa\x03\x74\x47\x1a\
\xae\xc1\xf0\x59\x1f\x22\xd1\xb3\x75\xe8\x8b\x7a\xf5\x31\x17\xb3\
\xf0\x2b\x36\x2f\x6d\x80\xe6\xfa\xe7\x16\xcf\x4a\x12\x15\xa8\xa3\
\x9d\xa8\x14\xc4\x44\x70\x1e\xea\x5d\xa9\x24\x02\x2b\xe5\xc3\xe7\
\x6e\x60\x0e\x0f\x2a\xca\x0b\xb9\xda\xd9\x47\xc1\xab\x19\xb8\xc6\
\xf5\x14\xb0\x50\x73\x5d\xe5\x57\x57\x4a\x50\x45\xef\x78\xcf\x1b\
\x8b\xd6\xf8\xae\xe6\xfa\x3c\xd7\x53\xc0\x02\xa4\xa7\x45\x39\x9f\
\x0e\x47\x52\xa1\x94\x32\xfa\x12\xbd\xe1\x02\x32\xcd\x8d\xf7\xc4\
\x25\x1f\x67\x28\xae\x35\x21\x87\x6f\x9c\xe3\x6d\xa2\x87\xa0\x15\
\x24\xbf\x1c\xf4\x3d\x05\x9c\xa9\x29\x2f\xad\xf2\x32\xf8\xf1\x07\
\x78\x59\x71\xad\x37\x66\xf6\xfd\x34\x43\xe5\xee\x96\x66\x4c\x06\
\x3f\x1d\xe0\x45\xcd\xf5\x73\x3c\x70\x70\x85\xde\x24\xb3\x3f\xe1\
\x1b\x6b\xc9\x9e\xbf\xf0\xd1\x01\xa6\xa1\x36\x47\x9e\x81\x3e\xe3\
\x75\x5a\x71\x01\xa5\x19\x9e\xe5\x1e\xb2\x4b\x74\x1f\x1d\x60\x23\
\xd9\xe1\x26\x02\x9d\x80\x9f\x79\xe0\x91\x34\xba\x02\x97\x84\x26\
\x11\x03\x1b\x91\x00\x56\x80\xbf\x03\x83\x93\x34\xd7\x2f\xa1\x75\
\x38\xb5\xb4\x63\x0c\xa5\x69\xcd\xfc\x03\xe2\x23\xe0\x1d\x73\x50\
\x6b\xa5\x0f\x26\x54\xce\x38\xc4\xd2\x17\x25\xa6\x91\xc4\x54\x18\
\x04\xac\x27\xbc\x26\x6f\x2b\xf3\x11\x8f\xe0\x20\x38\xcb\x80\xe0\
\xc9\xa1\xc8\x59\xa0\x02\xf1\x64\x56\xd5\x63\x19\xc9\x74\x34\x13\
\xf4\xc3\x2c\x68\xc4\x26\xe4\xc0\x6a\x30\x54\x02\xef\xa0\x26\xb9\
\x12\xb5\x1d\x3f\x0d\xb8\x1c\x7d\x63\x8f\xf1\xc4\xa5\x0e\xb1\xe7\
\x9b\x7c\xfd\x17\x79\xe2\xa4\xc4\x51\xe8\x89\xce\x21\x7d\x29\xdb\
\x73\x18\x85\xec\xfd\xab\xf8\xbf\x8b\x9f\xbc\xbd\x3f\xc0\x3c\xb8\
\xe4\x7d\x1e\xf8\x18\xc3\x24\xe2\xf6\x54\x02\xce\x55\x11\x38\x14\
\x09\x60\xa1\xe3\xee\x3a\xbc\xfb\x0e\xc0\x5f\x0d\x78\xe4\x64\x0a\
\x29\x6b\xcb\xfe\xc8\xf6\xa8\x8e\xf8\xd3\xb8\x8b\xba\x65\x8b\x83\
\x91\x5d\x3d\x1d\xe7\x89\x8e\x79\x1c\x0d\x7c\x6c\xc0\x23\x27\xd3\
\x48\x69\x16\xf1\xd3\x30\xab\xc0\x74\xdc\x06\x76\x36\xc1\xc9\x98\
\x0d\xb5\x8b\x70\xe7\xfb\xd7\x1b\x39\x43\x69\xfa\xe2\xd3\xf6\x01\
\x15\xc4\xed\x98\x55\x64\x09\x32\xfc\xfa\x46\x15\xb2\x66\x36\x89\
\xc6\xb5\x1e\xf8\xba\x03\x0e\x95\x48\xf4\x74\xdb\x38\x04\x77\x50\
\x02\x3e\x97\x95\xe8\x97\x53\x39\xc9\xc5\x10\xf0\x15\xfd\xb2\x0e\
\xf3\x78\x7c\x19\xe0\xfb\x0e\x38\x1c\x8b\x7a\x27\xb5\x90\x34\x22\
\x21\xe8\x4b\x06\x03\xd1\x6b\xd5\x2d\xe5\x23\xc4\xcb\xc8\x55\xef\
\xee\x07\xfc\xc9\x92\xd3\x5d\x09\x73\x18\x09\xcc\xb4\x28\xbf\xe5\
\x48\x79\x78\xc2\x5c\x9c\xa2\x13\xf1\x93\x2f\x7d\x80\xec\x1f\x24\
\x65\x8a\x3d\x08\x31\x57\xc7\x49\xe7\x92\x41\x52\xce\x0d\x2d\x92\
\x43\x1d\xb2\xfa\x89\xd3\x1e\x93\x49\x49\x6c\x45\xd3\xb0\x73\x15\
\x48\x68\xb3\x38\x95\x6d\x29\x9b\x90\xc0\xc8\x17\xa1\x4f\xd7\xde\
\x12\xdd\x90\x40\x8b\xe3\xb1\x0f\xc4\x1c\x25\x4d\x48\xb4\x13\xdb\
\x38\x87\x7b\x21\x6d\x11\x27\x57\xc1\x97\xc0\x8f\x2d\xcb\xdb\x06\
\x49\xc6\x09\xac\x44\xe2\x00\x4e\x45\x3f\x2c\x8e\x27\xf9\x38\xfb\
\x20\x31\xf3\x17\xb0\x6d\xca\x98\x66\xb6\x26\x77\xac\x45\x82\x4f\
\xef\x89\xbb\x68\x9e\x19\xa4\x23\x5c\x0b\xfc\x5b\x71\xdf\x50\xe0\
\x0a\xe0\x54\xe2\x6d\xca\x4d\x46\x46\xc1\x4f\x63\xfc\xd6\x09\x6e\
\x65\x6b\xcf\xbc\x85\xe8\x06\xbe\x80\x64\xbe\xb8\xb4\x4b\x06\x31\
\xc2\xe4\xdb\xdf\xeb\x90\x0e\x12\x37\xf9\xe5\x27\xc0\x29\x11\x6d\
\x1b\x0c\x17\xd2\x9a\xe8\x13\xb4\xd6\xda\x4f\xc0\x4c\xc1\x9a\x86\
\x9c\x01\x08\xfd\x12\x73\xd2\x84\x59\xb4\x70\x55\x7d\xce\xce\xfe\
\x8d\xfb\x8c\xb5\x48\xc8\xf8\xd4\xe5\x01\xf8\x36\xd1\x71\xec\xdf\
\x40\x34\x6b\x10\x45\xcb\xc4\xa0\xf2\x1a\x32\x64\x77\x44\xfc\x04\
\x4c\x2c\x70\x2e\x65\x26\x72\x34\xbd\x3f\x32\xb5\xc5\xc9\xe9\x53\
\x8c\x34\x22\x89\x2b\x72\xed\x98\x2a\x0c\x46\x6f\xa4\x58\x8c\xf8\
\xa7\x2f\xd7\xdc\xd7\x8c\xf8\xd0\xe7\x5b\xfe\x7a\x22\xf3\xa9\x89\
\x1d\x3e\x49\x99\x8d\x8c\x58\xf9\x7a\x52\xdc\x9c\x3e\x71\x64\x0a\
\x66\x79\x8c\x83\xa0\x27\xa2\x6c\x25\x55\xd9\x15\xc0\x1e\x8a\xf2\
\xba\x03\x3f\x47\xa2\x71\xb8\x6a\xf0\xcd\xc0\x63\xc8\xa8\xa6\xc3\
\x7e\xc4\x4b\x75\x67\x22\xcf\x11\x78\xef\x5e\x87\x2a\x92\x4d\xed\
\xba\x1e\xbb\x44\x0b\x07\x02\xd7\x23\x5a\x76\xb1\x65\xaf\x41\x22\
\x68\x9f\x4f\x3c\xbb\xc2\x21\x24\x97\x4c\x7a\x06\x62\x08\xf2\x86\
\xb8\x16\xb5\x5b\x31\xfb\x4a\x4c\xd0\x84\x78\x0b\xcd\xb2\xf8\xcd\
\xec\xac\x8c\x43\x4e\xe6\x0c\x47\xb4\xeb\xfd\x80\xdd\x90\xf9\x7a\
\x47\xb6\xdd\x09\x5b\x8d\x4c\x43\x4b\x91\x51\xe4\x6d\x44\xd1\x9c\
\x4d\xfc\x50\x75\x55\xc8\x72\x4e\x75\x02\xd7\x06\x03\x11\x3b\x42\
\x3d\x32\x1a\xa5\x12\x17\x93\xec\x70\x57\x4c\xaa\x58\x13\xb8\x70\
\x7c\xad\x04\x7e\x88\xec\x00\xba\x98\x02\x16\x21\xc9\xaa\x52\xe7\
\x72\x7e\x14\xc9\xc6\xc6\x2d\xb5\x73\x81\x15\x48\xca\xd9\xf7\x70\
\xf3\xe2\xf3\xe5\x7d\x64\xeb\x3c\x15\x39\x9e\xf7\x66\xab\x65\x4d\
\x25\xaf\x62\xae\x21\x7f\x8a\x44\xac\x2c\x05\x8c\x40\x96\xa8\x3e\
\x5e\x7c\xbe\xbc\x86\x3e\x23\x88\x53\xd4\x60\x66\x33\x7f\x06\x19\
\x1e\x4f\xc1\xdc\x5f\x6d\x2d\x92\x4e\x26\xad\x18\x84\xc4\x03\x8e\
\xfb\xf2\xd6\x02\x37\x22\xb6\x84\x1b\xb2\xff\x1d\xf7\x59\x8f\x13\
\x20\x47\x70\x15\x72\xbe\x4f\x47\x6e\x3e\xdb\xe6\xf9\x1b\x86\x6c\
\x51\x9a\x54\x2c\x03\x5c\xe5\xbc\x26\x76\xa8\x42\xbc\x7b\xe3\xda\
\x1f\xd6\x20\x26\xf1\xfc\xa4\xd2\xbd\x90\xba\xc6\x4d\x36\xb1\x3e\
\xfb\x7b\x6f\xae\x5e\x77\x19\x90\x5a\x86\x68\xdf\xf9\xe8\x8f\x9d\
\x49\xf7\x5e\xd2\xa1\xf8\x8c\x24\xfe\x12\xf3\x7f\xc8\x0b\xd2\xe5\
\x39\xee\x86\x74\xb0\x65\x31\xcb\x79\x97\xe4\x56\x62\x91\xf8\x85\
\x01\x91\x75\xa8\x43\xbd\xec\x80\x5d\x12\x64\x5d\x7c\x5b\x97\xe8\
\x8c\xa4\xaf\x89\xb3\x51\xb3\x0a\xf8\x15\xf6\x2e\xed\x5d\xb3\xbf\
\x8b\x33\x22\x64\xb2\x7c\x9d\x44\x5c\x3b\x16\xb3\x8d\x9b\x7a\xf4\
\x61\xd2\x2a\x91\x80\xca\x26\x95\x3a\x3f\xe1\x7a\x98\x62\x08\xf6\
\x2e\x58\xb9\x0f\x60\x3c\xc5\x67\x36\xef\x89\x84\xa7\x35\xd5\x9d\
\x5a\xca\xdb\x24\x9c\x6f\x71\x5f\xc4\xd9\xc0\x94\xc0\x5c\xd4\x27\
\x7a\xce\xc4\xec\xab\xba\x21\xc9\x4a\x58\xe0\x42\xec\xcf\xfa\x6d\
\x41\xa6\xc7\xa4\x37\x69\xfa\x11\x6f\xd3\x69\x7d\xb6\x1e\x45\xa3\
\x0f\xf1\x0c\x1c\x4b\x28\x1c\x2c\x69\x04\x66\x5b\xa9\x53\xf1\x9f\
\xf7\xbe\x0a\x49\xe6\x60\x3b\xec\x3e\x8a\xfb\x30\xf0\x7b\x02\x8f\
\x60\x3f\x1d\xdd\x47\x11\x7a\x54\x15\xc5\xd9\xb5\xd7\x20\x53\x47\
\x0e\xb5\x98\xed\x02\xe6\xaf\x20\x7c\xa0\x27\x92\xb0\xc2\xa6\x7e\
\x0b\x81\xd1\x9e\x79\x1e\x86\xfd\xd4\xf4\x0a\x31\xfd\x25\x6d\x0f\
\x1f\x44\x0d\x8d\xe7\x23\x0d\xbc\xc0\xe0\xfe\xe5\x88\x0d\xdc\x27\
\x76\x46\x72\x01\x98\xd6\x69\x1d\xa2\x10\x87\xf2\xb5\xaf\x42\xdc\
\xbd\x6d\xf4\x83\xf9\xb4\x5e\x82\x2a\xf1\x53\x8b\x87\x9b\xc8\x22\
\x83\x7b\x36\xe2\xdf\xc2\xd5\x07\x69\x1c\xd3\x7a\xcc\x40\xbd\x4d\
\xed\x13\x7b\x60\xe7\x32\x3e\x1f\xa9\xaf\x16\x75\xe8\xe7\xe9\x4d\
\xc8\x5e\xc0\x33\x16\x04\x74\x72\x6e\xac\x66\x88\x8f\x1a\x24\xbd\
\xbb\x09\xb7\xcd\xc8\x57\x17\x3c\xed\x7a\x1e\x3a\x00\x63\x31\x57\
\x12\xe7\x21\xf5\x56\xc2\xe4\x84\xce\x05\xd9\x7b\x3b\x22\x6b\xcf\
\x62\x5f\xfe\xb5\x31\x1b\x20\x2e\x2a\x31\xb3\x68\x36\x23\x0a\x6d\
\x50\xdb\xbb\x01\x0e\xc4\x5c\x59\x7f\x11\xc5\xf4\xb5\x1f\x7a\x4d\
\xf3\xce\x02\xbf\xbb\xcc\xe0\x77\x51\x32\x19\xff\x5f\xd6\x8d\x86\
\xdc\x62\x2b\x50\x01\xd0\x0b\xe1\x6b\x52\xaf\xc8\x25\xf6\xd5\x9a\
\x1f\xce\x24\xfa\x7c\xf9\xe9\xd8\x7b\xcb\xbe\x89\x7f\xcf\xd6\x53\
\x0d\xb9\x3d\x49\xca\x4f\xd4\x16\x40\x35\xe6\xae\x69\xad\xdc\xc9\
\x2b\x90\xa5\xd0\x91\x11\x0f\x5f\x83\x6c\x03\xab\x0e\x1f\x8c\xcc\
\x12\xd0\xce\x33\x88\xdd\xfb\x60\x64\x88\xf5\x85\x1e\xc8\x6a\x44\
\x17\x8b\xb0\x11\x78\x0a\xb3\x8c\x62\x69\x43\x47\x24\x45\xaf\x6e\
\xfd\xff\x19\x5b\xb7\xf5\xbf\xc2\x52\xa2\x7b\xcc\x43\x06\x85\x77\
\xc1\x6c\x9d\xba\x9e\x30\xd1\xc1\x4d\x36\xb3\xda\x92\xb4\x3a\xb5\
\xa5\x8a\xd6\xa1\x8b\x72\x5d\x81\x78\xd1\xea\x0a\xcd\x10\x26\xa6\
\xee\xbe\xf8\x71\xdf\x2e\x25\x69\x42\xf4\xbe\xaf\xb0\x52\x71\xf3\
\x13\x9a\x06\xbe\xc2\xb0\xd0\xab\x34\xcf\x71\x85\x49\x86\xfc\xda\
\x9a\x6c\x13\xb8\x53\xa5\x45\xae\xa3\xf0\x3e\x3f\xc0\x71\x98\x7d\
\x5d\x0f\x13\xc6\xa7\x6d\x00\xfe\x4f\xf1\x94\x8a\x6c\xa6\x45\x9a\
\x1b\x5d\xa8\x96\xf9\xb4\xce\x00\x7e\x2a\x66\xae\x4d\xf5\x84\xd3\
\xaa\xaf\x34\xe0\xd7\x96\xe5\xca\x5c\x43\x1d\x66\x70\x73\x23\xb2\
\x9f\xff\x14\xe6\x6e\x5e\x4b\xb0\xb4\x45\x27\x8c\xb7\x22\x78\xb5\
\x8b\xc8\x5b\x20\x43\x73\x05\xb2\x4c\xb2\x09\xb0\xa0\xc3\x3a\xe0\
\x9b\xb9\x42\x02\x60\x10\x12\x45\x24\x0a\x6b\x10\xdd\xa7\xdc\xd1\
\x1b\xb5\xb3\xce\xee\xb9\x7f\x98\x44\xf0\x34\x95\x50\x1a\x7f\x4b\
\x9c\x8b\x9a\xe3\xd8\x70\xd4\xbc\x62\x2c\xea\x76\xd8\x26\x59\xc7\
\x44\xcd\xcd\xa6\x72\xb9\xd3\x2a\x99\x61\x02\x6a\x8e\xa9\x3d\x71\
\x9b\x30\x86\xa0\x6e\x87\x09\x2d\x6f\xde\x8e\xf8\x41\x8a\x72\x12\
\xca\xa5\x2b\x1f\xcf\x13\xcd\x31\x48\xac\xfc\x80\x50\xe9\x6c\xcf\
\xe7\xdf\xdc\x19\x09\x93\x66\xfb\xe2\xb7\x20\xd9\xba\xd2\x02\x55\
\x54\xf2\x97\x02\xf2\x0a\x01\xd5\x0e\xe8\x3b\x51\x3f\x3a\x01\x73\
\xa7\x89\xe9\x14\x1f\x22\x2d\x69\xa8\x42\xa8\x3f\x1c\x90\x57\x08\
\xfc\x0d\xc5\x68\x18\xb5\x47\xfc\x14\xe2\x9f\x3f\x0a\xd9\x64\x38\
\x94\xad\x47\x92\x36\x23\xee\x54\xf5\x48\xa4\x8c\xc8\x5e\x14\x10\
\xab\x90\x51\xa9\x10\x16\xf8\x24\x92\x02\x2c\x40\xf2\x2e\x17\xc2\
\xba\xff\x03\xc9\x8e\x5b\xd1\x0d\x4e\xbf\x25\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xff\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\xa1\x49\x44\x41\x54\x78\xda\xa4\
\x53\xcf\x2f\x03\x41\x14\xfe\x66\x76\x5a\x21\x25\x7e\x86\xc4\x45\
\xfc\x01\x2e\x22\x21\x71\xf0\x3f\x38\xb9\x22\x71\x74\x94\xe0\xe2\
\x50\x71\x27\x3d\x71\xe4\x22\x12\x47\x2e\xe2\x26\xe2\x52\x15\xa4\
\xc4\xa6\xf5\xa3\xaa\xb2\xa9\x52\xdb\xad\xee\xce\x7a\xb3\x2a\x5a\
\xa9\x64\xa5\x2f\xf9\xf6\x7d\x93\xf7\xbe\x37\x33\x6f\xf6\x31\xd7\
\x75\x51\x8f\x89\x93\x15\x56\x33\xc0\x19\x06\xc8\x8d\xd7\x08\xed\
\x48\x17\xb1\xef\x7d\x05\x61\x8a\xd0\xf7\x3b\x8b\x92\x30\x38\x27\
\x17\x73\x85\x02\xb2\xa6\x89\x69\xbd\x03\x86\x61\x62\xfd\x34\xc4\
\x29\x3c\x44\xd8\x50\x79\xec\x28\x8c\xf0\xf0\xec\xee\x3c\x1c\x07\
\xae\x2b\xe1\xca\x4a\x38\x5f\x5e\xc5\xca\x9c\x31\x86\xe8\xe6\xcc\
\x32\x69\x17\xbc\x13\x38\x12\x9a\x12\xcb\x8f\x62\x95\x50\x56\x88\
\x3d\x5e\xf6\x9a\x16\x80\xa7\x21\x0b\xd0\x97\xab\x45\xf5\xce\x7f\
\x8b\xbf\x4f\xa2\x34\x41\xe1\xf5\x09\xc2\xa6\x85\xb4\x6d\x18\xf1\
\x43\x14\xdf\x9e\x01\x55\x8c\x3a\xf4\x05\x49\x6b\xf2\xf2\x87\x73\
\xce\xd5\xce\xa3\xa4\x5d\xa2\x43\x24\x04\x15\x15\x56\x2e\x0d\x9b\
\x92\x7a\x46\x26\x7c\x3d\x5d\xef\xd8\xe4\xf0\x7b\x46\xef\xbe\xd8\
\x9e\x5f\x15\x36\x5d\x2b\x6f\x3c\x40\x6b\x6a\x83\x95\x49\xc3\xcf\
\x7f\xc1\x18\xc7\x73\xfc\xf8\xde\xb2\x71\xae\x0a\x88\x6c\x4a\x47\
\x57\xff\x20\x6c\x6a\xa4\x1f\xd3\x82\x0d\x48\xdf\x9c\xa5\x4d\x0b\
\x97\xa2\x24\x21\xf2\xb9\x57\xf4\xb0\x80\xef\x02\x4e\xc9\x7a\x4b\
\x25\xf5\x27\x8d\xe3\x5e\x50\xff\x02\x5a\x63\x3b\x6c\xcb\xa4\x4e\
\x4b\x5f\x05\x8c\x54\xfc\x31\x57\xc0\x35\x51\x57\x10\x69\x09\x35\
\x77\xd2\x93\x70\x70\x8d\xfb\xb8\x3f\xc3\x9d\x7e\xf5\x72\x76\x8b\
\xd8\xda\x1e\x3d\x63\x34\x81\xe4\xeb\x56\xe4\xe0\x3f\x03\xa4\x67\
\x70\x1d\xd9\x87\x4e\xb4\x55\x4d\x52\x50\x91\xf2\x5c\xf8\x35\x87\
\x60\x11\x8a\xac\xde\x71\xfe\x14\x60\x00\x0e\x52\x12\x9f\x1d\xc6\
\x49\x04\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xfb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\x9d\x49\x44\x41\x54\x78\xda\xa4\
\x93\xbf\x4b\xc3\x50\x10\xc7\x2f\x2f\x2f\x92\x0e\x45\x9d\xec\x14\
\xc4\x52\x5d\x5d\xea\xa2\x90\xbf\xc3\xc1\x41\xba\xe8\xe2\xe0\xa4\
\xe0\xe0\xe0\xe0\x54\x45\x1c\x44\x10\x07\x71\x71\x76\x73\x2a\xea\
\x28\x9d\x84\xa2\xed\x24\xa5\x0e\xa2\x85\xd6\x5a\x5e\x93\x9c\x77\
\xaf\x69\x4d\x51\xdb\x8a\x81\xcb\xcb\x8f\xfb\x7e\xde\xf7\xee\x12\
\x03\x11\xe1\x3f\x87\xcc\xc8\x7d\x5a\x8c\x15\x3a\x25\x6b\x5b\x6f\
\x8f\xc3\x88\xe2\x3b\xe3\x29\x5a\x4a\x00\x78\x24\xdb\xfb\x63\x6a\
\xe3\x2c\xb3\x3e\xbd\x18\x87\x41\x7e\x0c\x8a\x87\x99\x1a\xec\x2e\
\x9d\x64\xb5\x03\x84\x80\x57\xa1\x94\x02\x8f\xca\xf1\x7d\xbf\x2f\
\xc0\x34\x4d\xe0\x5c\xd2\x89\x10\xa0\xf7\x14\x41\x10\x80\x22\x71\
\xab\xd5\xea\x0b\xb0\x28\x38\x97\x74\x22\xea\xc0\xf4\x3c\x4f\x03\
\x9a\x03\x00\x28\x04\x70\x2e\xe9\x4c\x0d\x08\x50\x5b\x36\xd9\xba\
\x22\xf2\x20\x80\xb0\x2c\x5d\x26\xe9\x42\x40\xe8\x80\x1f\xbe\x57\
\x14\x8c\x4c\xd8\x7d\x01\x9c\xa3\x01\x1d\x07\xd1\x12\x8a\x17\x4f\
\x43\xcd\x3e\x16\x8b\x45\x4a\x80\xaf\x12\xf2\xf9\x3c\xfc\xf6\x61\
\x19\x86\xd1\xbd\x76\x5d\x97\xb6\xed\x2d\x41\xb2\x03\x1e\xd1\xfc\
\x5a\xf2\xdb\x24\x2c\xaa\xfb\xf6\xa0\x04\x8e\xe3\x40\xb9\x5c\xd6\
\x4d\x24\x9d\x8c\x3a\x90\xec\x40\x4a\x09\xf5\x7a\x13\x72\xd9\x7b\
\x2d\x9c\x5b\x9d\xea\x5a\xe6\x77\x7c\xd8\xb6\x1d\xf6\xc0\x6f\x03\
\x7c\xf0\xf4\x26\x82\xc6\xc3\x49\x8d\x86\x82\x44\x22\xa1\x93\xf9\
\x5a\x8f\x0e\xcd\x1e\x00\x7f\x07\xa4\xb3\xf4\x54\xae\xf1\x1c\x9e\
\xb1\x78\xb8\xb7\x79\x7c\xf5\x52\x79\xad\x2a\x85\x3a\x99\x03\xb1\
\x1d\x9d\x67\x51\x80\xc2\x8f\x38\x6b\xb9\x33\x0e\xc5\x58\x12\xd2\
\xe9\x49\x98\xcd\xe4\xe4\xe9\xcd\x4f\x4d\x74\xbd\xe5\x85\xe8\x7d\
\x15\x2a\x85\x3b\xb8\xdc\x66\xc0\x28\x83\x79\x12\x7f\xfc\x93\xb9\
\x79\xcd\x4f\x01\x06\x00\xba\x01\xda\x2a\x98\x34\xd6\x68\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\x73\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x01\x25\x49\x44\x41\x54\x38\x8d\x9d\
\xd2\xb1\x2b\x84\x71\x1c\xc7\xf1\x17\x27\xe9\xba\xd0\x25\x29\x92\
\xd1\x20\xc9\x2c\x03\x37\x59\xec\xb2\xdb\xfc\x0d\x4a\x31\xcb\x20\
\x83\x4d\x5d\x92\x4c\x06\xe3\x65\xc0\x60\x12\x9b\x0c\x32\xc9\x82\
\x43\x5d\xe7\x0c\xbf\x1f\x3d\x77\xee\x1e\xf2\x5e\x9e\xa7\xcf\xf3\
\x7c\x3f\xdf\xdf\xf7\xf3\xfb\x52\xcf\x04\x76\x70\x8b\x32\xee\xb0\
\x8f\x82\x5f\x68\xc7\x3a\x9e\xb1\x89\x19\x8c\x63\x0a\xab\x78\x44\
\x11\x5d\xad\x0c\xd6\x71\x83\x51\x2c\x62\x1b\xd3\x89\xef\xfd\x38\
\xc5\x61\xb3\xe2\xc9\x78\xdc\x51\x6c\x61\x03\x1d\x4d\xfe\xcb\xc7\
\x91\x16\x92\x62\x06\x6b\x38\x47\x15\x83\x58\xc6\x47\x13\x83\xb7\
\xd8\x68\x49\xc8\xe9\x9b\x1b\x21\xa4\x9d\x16\x9d\x93\x0c\xc4\x46\
\xd9\xa4\x58\xc6\x98\x3f\x24\x2d\x84\x5d\xc5\x50\x52\xbc\x57\x1f\
\x58\x1a\x7d\xa8\xa1\x3b\xe9\x78\x86\xd9\x3f\x1a\x14\x70\x8d\xa7\
\x46\xf1\x41\xb8\xaa\x34\x3a\x71\x29\x84\xfc\x83\xa2\x70\xcf\xf9\
\x94\xe2\x3d\x5c\xc4\xf7\x6f\x32\xf1\x79\x84\x39\xac\xe0\x55\xc8\
\xe5\x25\x1a\xce\x63\x17\x3d\x78\xc7\x88\x30\x76\xa5\xb1\x4b\xbb\
\xb0\x24\x5f\x3b\x51\x11\x02\xbb\x8a\xc7\xee\x14\x36\xb6\x86\x12\
\x72\x69\xf3\x66\x31\x2c\x91\x76\x42\x2f\x45\x93\x93\xdf\x4c\x5a\
\x91\x8b\xc5\x35\x1c\xb7\xfd\xc7\x21\x9a\x1c\xa0\xf7\x13\xe3\x6d\
\x3e\x8d\x53\xcc\x92\x00\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x06\xfa\x64\xc3\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x65\x00\x73\
\x00\x04\
\x00\x07\x35\xdf\
\x00\x6c\
\x00\x6f\x00\x67\x00\x6f\
\x00\x0f\
\x03\x33\xc9\x27\
\x00\x32\
\x00\x37\x00\x37\x00\x30\x00\x39\x00\x37\x00\x32\x00\x5f\x00\x70\x00\x6e\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x01\x3c\xbd\x87\
\x00\x66\
\x00\x6f\x00\x6c\x00\x64\x00\x65\x00\x72\x00\x2d\x00\x68\x00\x6f\x00\x72\x00\x69\x00\x7a\x00\x6f\x00\x6e\x00\x74\x00\x61\x00\x6c\
\x00\x2d\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x00\x9e\x5a\x27\
\x00\x64\
\x00\x69\x00\x73\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x09\xfb\x76\xc7\
\x00\x61\
\x00\x63\x00\x74\x00\x69\x00\x76\x00\x65\x00\x2d\x00\x73\x00\x65\x00\x61\x00\x72\x00\x63\x00\x68\x00\x2d\x00\x31\x00\x36\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x12\x0f\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x10\x0c\
\x00\x00\x00\x94\x00\x00\x00\x00\x00\x01\x00\x00\x14\x0e\
\x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x06\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x12\x0f\
\x00\x00\x01\x21\x2c\x82\xdb\xd0\
\x00\x00\x00\x44\x00\x00\x00\x00\x00\x01\x00\x00\x10\x0c\
\x00\x00\x01\x25\x17\x1f\x12\x50\
\x00\x00\x00\x94\x00\x00\x00\x00\x00\x01\x00\x00\x14\x0e\
\x00\x00\x01\x7d\x2a\x8d\x95\x3a\
\x00\x00\x00\x20\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x72\x13\x8c\xbd\xf2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 59.050228
| 130
| 0.713927
|
8958d594db951fc45e4ca793233c828ba47886a2
| 2,276
|
py
|
Python
|
examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py
|
louis-she/ignite
|
d05a8939139e056e5c5daf842c81af0ab5b0caaf
|
[
"BSD-3-Clause"
] | 3
|
2021-12-15T17:08:20.000Z
|
2022-01-06T14:53:09.000Z
|
examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py
|
louis-she/ignite
|
d05a8939139e056e5c5daf842c81af0ab5b0caaf
|
[
"BSD-3-Clause"
] | null | null | null |
examples/contrib/cifar100_amp_benchmark/benchmark_fp32.py
|
louis-she/ignite
|
d05a8939139e056e5c5daf842c81af0ab5b0caaf
|
[
"BSD-3-Clause"
] | null | null | null |
import fire
import torch
from torch.nn import CrossEntropyLoss
from torch.optim import SGD
from torchvision.models import wide_resnet50_2
from utils import get_train_eval_loaders
from ignite.contrib.handlers import ProgressBar
from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events
from ignite.handlers import Timer
from ignite.metrics import Accuracy, Loss
def main(dataset_path, batch_size=256, max_epochs=10):
assert torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled."
torch.backends.cudnn.benchmark = True
device = "cuda"
train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size)
model = wide_resnet50_2(num_classes=100).to(device)
optimizer = SGD(model.parameters(), lr=0.01)
criterion = CrossEntropyLoss().to(device)
def train_step(engine, batch):
x = convert_tensor(batch[0], device, non_blocking=True)
y = convert_tensor(batch[1], device, non_blocking=True)
optimizer.zero_grad()
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
timer = Timer(average=True)
timer.attach(trainer, step=Events.EPOCH_COMPLETED)
ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out})
metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def log_metrics(engine, title):
for name in metrics:
print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}")
@trainer.on(Events.COMPLETED)
def run_validation(_):
print(f"- Mean elapsed time for 1 epoch: {timer.value()}")
print("- Metrics:")
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"):
evaluator.run(eval_train_loader)
with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"):
evaluator.run(test_loader)
trainer.run(train_loader, max_epochs=max_epochs)
if __name__ == "__main__":
fire.Fire(main)
| 32.985507
| 110
| 0.714411
|
a7d0d8fa4c84549233033373bb5923c2b8203ef1
| 11,550
|
py
|
Python
|
Python/klampt/sim/batch.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
Python/klampt/sim/batch.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
Python/klampt/sim/batch.py
|
ipa-rmb-mr/Klampt
|
71793b54eead788811b4e62bcf8dadb49b68ff17
|
[
"BSD-3-Clause"
] | null | null | null |
from ..robotsim import *
from ..model import access
from simulation import SimpleSimulator
import time
def getWorldSimState(world):
"""Returns a dict containing a copy of all variables that are
simulated in the world. Can be used with setWorldSimState to save/
restore state.
NOTE: this does not perfectly save the state of a Simulator! To do that,
you must use the Simulator().getState()/saveState() methods.
"""
res = dict()
for i in range(world.numRigidObjects()):
res['rigidObjects['+str(i)+'].transform']=world.rigidObject(i).getTransform()
for i in range(world.numRobots()):
res['robots['+str(i)+'].config']=world.robot(i).getConfig()
res['robots['+str(i)+'].velocity']=world.robot(i).getVelocity()
return res
def setWorldSimState(world,state):
"""Sets the world state to the prior saved state (a dict from
getWorldSimState())
NOTE: this does not perfectly save simulation state! To do that,
you must use the Simulator().getState()/saveState() methods.
"""
for (k,v) in state.iteritems():
access.set_item(world,k,v)
return
def doSim(world,duration,initialCondition,
returnItems=None,trace=False,
simDt=0.01,simInit=None,simStep=None,simTerm=None):
"""Runs a simulation for a given initial condition of a world.
Args:
world (WorldModel): the world
duration (float): the maximum duration of simulation, in seconds
initialCondition (dict): a dictionary mapping named items to values.
Each named item is specified by a path as used by the access
module, e.g. 'robot[0].config[4]'. See the documentation for
access.get_item()/
access.set_item() for details.
Special items include 'args' which is a tuple provided to each
simInit, simStep, and simTerm call.
returnItems (list of strs, optional): a list of named items to return
in the final state of the simulation. By default returns
everything that is variable in the simulator (simulation time,
robot and rigid object configuration / velocity, robot commands,
robot sensors).
trace (bool, optional): if True, returns the entire trace of
the items specified in returnItems rather than just the final
state.
simDt (float, optional, default 0.01): the outer simulation loop
(usually corresponds to the control rate).
simInit (function, optional): a function f(sim) called on the simulator
after its initial conditions are set but before simulating. You may
configure the simulator with this function.
simStep (function, optional): a function f(sim) that is called on every
outer simulation loop (usually a controller function).
simTerm (function, optional): a function f(sim) that returns True if
the simulation should terminate early. Called on every outer
simulation loop.
Returns:
(dict): the final state of each returned item upon termination. The
dictionary maps named items (specified by the returnItems argument)
to their values. Additional returned items are:
* 'status', which gives the status string of the simulation
* 'time', which gives the time of the simulation, in s
* 'wall_clock_time', which gives the time elapsed while computing
the simulation, in s
"""
if returnItems == None:
#set up default return items
returnItems = []
for i in range(world.numRigidObjects()):
returnItems.append('rigidObjects['+str(i)+'].transform')
returnItems.append('rigidObjects['+str(i)+'].velocity')
for i in range(world.numRobots()):
returnItems.append('time')
returnItems.append('controllers['+str(i)+'].commandedConfig')
returnItems.append('controllers['+str(i)+'].commandedVelocity')
returnItems.append('controllers['+str(i)+'].sensedConfig')
returnItems.append('controllers['+str(i)+'].sensedVelocity')
returnItems.append('controllers['+str(i)+'].sensors')
returnItems.append('robots['+str(i)+'].actualConfig')
returnItems.append('robots['+str(i)+'].actualVelocity')
returnItems.append('robots['+str(i)+'].actualTorques')
initCond = getWorldSimState(world)
args = ()
for k,v in initialCondition.iteritems():
if k is not 'args':
access.set_item(world,k,v)
else:
args = v
sim = SimpleSimulator(world)
if simInit: simInit(sim,*args)
assert simDt > 0,"Time step must be positive"
res = dict()
if trace:
for k in returnItems:
res[k] = [access.get_item(sim,k)]
res['status'] = [sim.getStatusString()]
print "klampt.batch.doSim(): Running simulation for",duration,"s"
t0 = time.time()
t = 0
worst_status = 0
while t < duration:
if simTerm and simTerm(sim,*args)==True:
if not trace:
for k in returnItems:
res[k] = access.get_item(sim,k)
res['status']=sim.getStatusString(worst_status)
res['time']=t
res['wall_clock_time']=time.time()-t0
#restore initial world state
setWorldSimState(world,initCond)
print " Termination condition reached at",t,"s"
print " Computation time:",time.time()-t0
return res
if simStep: simStep(sim,*args)
sim.simulate(simDt)
worst_status = max(worst_status,sim.getStatus())
if trace:
for k in returnItems:
res[k].append(access.get_item(sim,k))
res['status'].append(sim.getStatusString())
res['time']=t
res['wall_clock_time']=time.time()-t0
t += simDt
if not trace:
#just get the terminal stats
for k in returnItems:
res[k] = access.get_item(sim,k)
res['status']=sim.getStatusString(worst_status)
res['time']=t
res['wall_clock_time']=time.time()-t0
print " Done."
print " Computation time:",time.time()-t0
#restore initial world state
setWorldSimState(world,initCond)
return res
def batchSim(world,duration,initialConditions,returnItems,
simDt=0.01,simInit=None,simStep=None,simTerm=None):
"""Given a world, a simulation duration, and a list of initial conditions,
runs simulations for all initial conditions.
Args:
world,duration,returnItems,simDt,simInit,simStep,simTerm: the same as
in doSim()
initialConditions (dict or list): either a dict mapping named items to
lists of initial values, or a list of initial state dictionaries.
In the former case, all entries must be of the same length.
Returns:
(list): all return values from doSim(). See the :func:`doSim`
documentation for more information on the arguments.
"""
res = []
if isinstance(initialConditions,dict):
#assume it is a dict-of-lists type
v0 = dict.itervalues().next()
for (k,v) in initialConditions.iteritems():
assert len(v)==len(v0),"initialConditions entries must all be of same length"
print "klampt.batch.batchSim(): Running",len(v0),"simulations..."
for i in xrange(len(v0)):
initCond = dict((k,v[i]) for (k,v) in initialConditions.iteritems())
try:
simRes = doSim(world,duration,initCond,returnItems,trace=False,
simDt=simDt,simInit=simInit,simStep=simStep,simTerm=simTerm)
except Exception:
print " Exception thrown on trial",i
simRes = 'error'
res.append(simRes)
else:
print "klampt.batch.batchSim(): Running",len(initialConditions),"simulations..."
for i,initCond in enumerate(initialConditions):
try:
simRes = doSim(world,duration,initCond,returnItems,trace=False,
simDt=simDt,simInit=simInit,simStep=simStep,simTerm=simTerm)
except Exception:
print " Exception thrown on trial",i
simRes = 'error'
res.append(simRes)
return res
def monteCarloSim(world,duration,initialConditionSamplers,N,returnItems,
simDt=0.01,simInit=None,simStep=None,simTerm=None):
"""Given a world, a simulation duration, and dict of sampling functions
for world items, runs N monte-carlo simulations.
Args:
world, duration, returnItems, simDt, simInit, simStep, simTerm:
same as for doSim()
initialConditionSamplers (dict of functions): a dict mapping named
world items to sampling functions that take no arguments (i.e.,
sample()).
N (int): the number of Monte Carlo samples.
Returns:
list: contains N pairs (initCond,returnVal) containing each simulation
result:
* initCond: the sampled initial condition
* returnVal: the return value from doSim().
"""
print "klampt.batch.monteCarloSim(): Running",N,"simulations..."
res = []
for sample in xrange(N):
initCond = dict((k,v()) for k,v in initialConditionSamplers.iteritems())
try:
simRes = doSim(world,duration,initCond,returnItems,trace=False,
simDt=simDt,simInit=simInit,simStep=simStep,simTerm=simTerm)
except Exception as e:
print " Exception thrown on trial",sample
print " what:",e
import traceback
traceback.print_exc()
simRes = 'error'
res.append((initCond,simRes))
return res
def saveStateHeaderCSV(state,f):
"""Given a state dictionary, saves the header CSV format to the given
output stream f"""
vflat = [access.flatten(state[k]) for k in state]
#write header
itemNames = []
for k,v in zip(state.keys(),vflat):
if len(v)==1:
itemNames.append(k)
else:
for i in range(len(v)):
itemNames.append(k+'['+str(i)+']')
f.write(','.join(items))
f.write('\n')
def saveStateCSV(state,f):
"""Given a state dictionary, saves it to CSV format to the given
output stream f"""
saveStateHeaderCSV(state,f)
f.write(','.join(str(v) for v in access.flatten(state)))
f.write('\n')
def saveStatesCSV(states,f):
"""Given list of state dictionaries, saves them to CSV format to the
given output stream f"""
saveStateHeaderCSV(states[0],f)
for state in states:
f.write(','.join(str(v) for v in access.flatten(state)))
f.write('\n')
return
def saveStateTrajectoryCSV(stateTraj,f):
"""Given a state trajectory (dict mapping keys to lists), saves it
to CSV format to the given output stream f."""
state0 = dict((k,v[0]) for (k,v) in stateTraj.iteritems())
state0['iter'] = 0
saveStateHeaderCSV(state0,f)
if len(stateTraj.items())==0:
return
length = len(stateTraj.values()[0])
for i in xrange(length):
state0['iter'] = i
for k in stateTraj.iterkeys():
state0[k] = stateTraj[k][i]
f.write(','.join(str(v) for v in access.flatten(state0)))
f.write('\n')
return
| 41.103203
| 91
| 0.618009
|
8afca7c1f9e3c2a76f6cbb6b19d1b01c72783652
| 4,114
|
py
|
Python
|
ct/py/csv_comparer_test.py
|
isabella232/skia-buildbot
|
6bfdd3e57760c114fdd6b207a4a254e01c0579be
|
[
"BSD-3-Clause"
] | 119
|
2015-01-09T20:49:54.000Z
|
2022-02-20T03:03:54.000Z
|
ct/py/csv_comparer_test.py
|
isabella232/skia-buildbot
|
6bfdd3e57760c114fdd6b207a4a254e01c0579be
|
[
"BSD-3-Clause"
] | 74
|
2018-06-22T09:57:11.000Z
|
2022-03-28T14:10:25.000Z
|
ct/py/csv_comparer_test.py
|
isabella232/skia-buildbot
|
6bfdd3e57760c114fdd6b207a4a254e01c0579be
|
[
"BSD-3-Clause"
] | 55
|
2015-01-23T13:45:32.000Z
|
2022-02-20T03:11:46.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for module csv_merger."""
import datetime
import filecmp
import os
import shutil
import tempfile
import unittest
import csv_comparer
class TestCsvComparer(unittest.TestCase):
def setUp(self):
self._test_csv_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_data', 'csv_comparer')
self._actual_output_dir = tempfile.mkdtemp()
# Set mocks.
class _MockUtcNow(object):
@staticmethod
def strftime(format_str):
self.assertEqual('%Y-%m-%d %H:%M UTC', format_str)
return '2014-05-19 16:50 UTC'
class _MockDatetime(object):
@staticmethod
def utcnow():
return _MockUtcNow()
self._original_datetime = datetime.datetime
datetime.datetime = _MockDatetime
def tearDown(self):
shutil.rmtree(self._actual_output_dir)
datetime.datetime = self._original_datetime
def _AssertHTMLFiles(self, sub_dir, additional_files=()):
# Ensure that the two html files we care about are as expected.
for html_file in ('index.html', 'fieldname1.html') + additional_files:
self.assertTrue(
filecmp.cmp(os.path.join(self._test_csv_dir, sub_dir, html_file),
os.path.join(self._actual_output_dir, html_file)))
def test_E2EComparerWithDiscardOutliers(self):
comparer = csv_comparer.CsvComparer(
csv_file1=os.path.join(self._test_csv_dir, 'comparer_csv1.csv'),
csv_file2=os.path.join(self._test_csv_dir, 'comparer_csv2.csv'),
output_html_dir=self._actual_output_dir,
requester_email='superman@krypton.com',
chromium_patch_link='http://chromium-patch.com',
skia_patch_link='http://skia-patch.com',
raw_csv_nopatch='http://raw-csv-nopatch.com',
raw_csv_withpatch='http://raw-csv-withpatch.com',
variance_threshold=10,
absolute_url='',
min_pages_in_each_field=1,
discard_outliers=12.5,
num_repeated=3,
target_platform='Android',
crashed_instances='build1-b5 build10-b5',
missing_devices='build99-b5 build100-b5',
browser_args_nopatch='--test=1',
browser_args_withpatch='--test=2',
pageset_type='Mobile10k',
chromium_hash='abcdefg1234567',
skia_hash='tuvwxyz1234567',
missing_output_workers='1 3 100',
logs_link_prefix=('https://chrome-swarming.appspot.com/tasklist?'
'l=500&f=runid:testing&f=name:perf_task_'),
description='E2EComparerWithDiscardOutliers',
total_archives='',
)
comparer.Compare()
self._AssertHTMLFiles('discard_outliers')
def test_E2EComparerWithNoDiscardOutliers(self):
comparer = csv_comparer.CsvComparer(
csv_file1=os.path.join(self._test_csv_dir, 'comparer_csv1.csv'),
csv_file2=os.path.join(self._test_csv_dir, 'comparer_csv2.csv'),
output_html_dir=self._actual_output_dir,
requester_email='superman@krypton.com',
chromium_patch_link='http://chromium-patch.com',
skia_patch_link='http://skia-patch.com',
raw_csv_nopatch='http://raw-csv-nopatch.com',
raw_csv_withpatch='http://raw-csv-withpatch.com',
variance_threshold=0,
absolute_url='',
min_pages_in_each_field=0,
discard_outliers=0,
num_repeated=3,
target_platform='Linux',
crashed_instances='',
missing_devices='',
browser_args_nopatch='',
browser_args_withpatch='',
pageset_type='10k',
chromium_hash='abcdefg1234567',
skia_hash='tuvwxyz1234567',
missing_output_workers='',
logs_link_prefix='',
description='E2EComparerWithNoDiscardOutliers',
total_archives='10',
)
comparer.Compare()
self._AssertHTMLFiles('keep_outliers',
('fieldname2.html', 'fieldname3.html'))
if __name__ == '__main__':
unittest.main()
| 34.864407
| 75
| 0.672095
|
eb3e60bcdc28f7c2fd2c12f6c0f31cfdccd30aa1
| 5,491
|
py
|
Python
|
send.py
|
jlinoff/simple-client-server
|
b5a9cf05f140f34df47aefead4981455c04a7e83
|
[
"MIT"
] | 4
|
2016-11-03T17:08:46.000Z
|
2021-09-21T13:57:32.000Z
|
send.py
|
jlinoff/simple-client-server
|
b5a9cf05f140f34df47aefead4981455c04a7e83
|
[
"MIT"
] | null | null | null |
send.py
|
jlinoff/simple-client-server
|
b5a9cf05f140f34df47aefead4981455c04a7e83
|
[
"MIT"
] | 2
|
2018-03-28T12:43:22.000Z
|
2021-09-21T13:57:33.000Z
|
#!/usr/bin/env python
'''
Simple sender.
Sends messages over a specific port to a host at periodic intervals.
Here is an example that sends messages to recv_host on port 8500.
$ firewall-cmd --zone=public --add-port=8500/tcp
$ send.py --host recv_host --port 8500
Here is what you would run on the recv_host:
$ firewall-cmd --zone=public --add-port=8500/tcp
$ recv.py --host 0.0.0.0 --port 8500
'''
import argparse
import datetime
import inspect
import json
import os
import random
import socket
import string
import sys
import time
VERSION = '1.0.1'
def infov(opts, msg, lev=1):
'''
Print a verbose message.
'''
if opts.verbose > 0:
print('INFO:{} {}'.format(inspect.stack()[lev][2], msg))
def getopts():
'''
Process the command line arguments.
'''
# Trick to capitalize the built-in headers.
# Unfortunately I can't get rid of the ":" reliably.
def gettext(s):
lookup = {
'usage: ': 'USAGE:',
'positional arguments': 'POSITIONAL ARGUMENTS',
'optional arguments': 'OPTIONAL ARGUMENTS',
'show this help message and exit': 'Show this help message and exit.\n ',
}
return lookup.get(s, s)
argparse._ = gettext # to capitalize help headers
base = os.path.basename(sys.argv[0])
name = os.path.splitext(base)[0]
usage = '\n {0} [OPTIONS] <DOT_FILE>'.format(base)
desc = 'DESCRIPTION:{0}'.format('\n '.join(__doc__.split('\n')))
epilog = r'''EXAMPLES:
# Example 1: help
$ {0} -h
# Example 2: simple send
$ {0}
# Example 3: send at 2 second intervals.
$ {0} -t 2
# Example 4: increase the send packet size.
# you would probably want to increase the size for the
# receiver as well.
$ {0} -s 4096
# Example 5: specify the host and port explicitly.
$ {0} -H other_host -p 8601
'''.format(base)
afc = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(formatter_class=afc,
description=desc[:-2],
usage=usage,
epilog=epilog)
parser.add_argument('-H', '--host',
action='store',
type=str,
default='127.0.0.1',
metavar=('HOST'),
help='''The host.
Default %(default)s.
''')
parser.add_argument('-p', '--port',
action='store',
type=int,
default=8500,
metavar=('PORT'),
help='''The port.
Default %(default)s.
''')
parser.add_argument('-q', '--quiet',
action='store_true',
help='''Don't display the messages as they are received.
''')
parser.add_argument('-s', '--size',
action='store',
type=int,
default=32,
metavar=('SIZE'),
help='''Send packet data size.
Default %(default)s.
''')
parser.add_argument('-r', '--rsize',
action='store',
type=int,
default=1024,
metavar=('SIZE'),
help='''The response packet data size.
Default %(default)s.
''')
parser.add_argument('-t', '--time',
action='store',
type=float,
default=1,
metavar=('SECONDS'),
help='''The time to pause between send operations.
Default %(default)s.
''')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='''Increase the level of verbosity.
''')
parser.add_argument('-V', '--version',
action='version',
version='%(prog)s version {0}'.format(VERSION),
help="""Show program's version number and exit.
""")
opts = parser.parse_args()
return opts
def create_record(opts):
'''
Create a record to send.
'''
infov(opts, 'create record with {} bytes of data'.format(opts.size))
data = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(opts.size))
rec = {'data': data, 'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'),}
json_rec = json.dumps(rec)
return json_rec.encode('ascii')
def send(opts, rec):
'''
Send the record.
'''
infov(opts, 'sending {}'.format(rec))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = (opts.host, opts.port)
if not opts.quiet:
print('SND: {} {}'.format(addr, rec))
try:
sock.connect(addr)
sock.sendall(bytes(rec))
response = sock.recv(opts.rsize)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
except socket.error: # need to change this for Python 3
pass # we don't care if there is no listener
def main():
'''
Main send loop.
'''
opts = getopts()
try:
while True:
rec = create_record(opts)
send(opts, rec)
time.sleep(opts.time)
except KeyboardInterrupt:
print('')
infov(opts, 'done')
if __name__ == '__main__':
main()
| 27.873096
| 99
| 0.516482
|
3729ed86845f9e46358ae63361144b6e9a0770c3
| 781
|
py
|
Python
|
foaflib/classes/onlineaccount.py
|
lmaurits/foaflib
|
d194357ba0631c03d581fb84522107f2893cc4d0
|
[
"BSD-3-Clause"
] | null | null | null |
foaflib/classes/onlineaccount.py
|
lmaurits/foaflib
|
d194357ba0631c03d581fb84522107f2893cc4d0
|
[
"BSD-3-Clause"
] | null | null | null |
foaflib/classes/onlineaccount.py
|
lmaurits/foaflib
|
d194357ba0631c03d581fb84522107f2893cc4d0
|
[
"BSD-3-Clause"
] | null | null | null |
import rdflib
class OnlineAccount(object):
def __init__(self, graph=None, node=None):
self.accountServiceHomepage = ""
self.accountName = ""
self.accountProfilePage = ""
if graph and node:
for homepage in graph.objects(subject=node, predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/accountServiceHomepage')):
self.accountServiceHomepage = unicode(homepage)
for name in graph.objects(subject=node, predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/accountName')):
self.accountName = unicode(name)
for profilepage in graph.objects(subject=node, predicate=rdflib.URIRef('http://xmlns.com/foaf/0.1/accountProfilePage')):
self.accountProfilePage = unicode(profilepage)
| 45.941176
| 133
| 0.674776
|
5aad5b1d97ebace9a79cdbbcf5bbbcf325d4cb04
| 7,806
|
py
|
Python
|
lightcone_resample/util_scripts/plot_redshift_color_diagnostics.py
|
ArgonneCPAC/skysim
|
f271debe3439efd1ae5230c6020b2dbc5f79d824
|
[
"BSD-2-Clause"
] | 4
|
2020-08-08T10:01:49.000Z
|
2022-02-27T07:21:00.000Z
|
lightcone_resample/util_scripts/plot_redshift_color_diagnostics.py
|
ArgonneCPAC/skysim
|
f271debe3439efd1ae5230c6020b2dbc5f79d824
|
[
"BSD-2-Clause"
] | 67
|
2018-07-16T22:12:16.000Z
|
2020-07-02T01:12:48.000Z
|
lightcone_resample/util_scripts/plot_redshift_color_diagnostics.py
|
aphearin/cosmodc2
|
5bc2abebd7123f29b424efc11c3ef374a51cd6c1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from matplotlib import cm
import dtk
import h5py
import sys
import time
from scipy import stats
def get_hfiles(fname, healpix_pixels):
if len(healpix_pixels) == 0:
healpix_pixels = ['']
hfiles =[]
print(fname)
for healpix_pixel in healpix_pixels:
print(healpix_pixel)
if "#z_range#" in fname:
for z_range in ["0_1", "1_2", "2_3"]:
ffname = fname.replace('#healpix#',str(healpix_pixel)).replace("#z_range#", z_range)
hfiles.append(h5py.File(ffname, 'r'))
else:
hfiles.append(h5py.File(fname.replace('#healpix#',str(healpix_pixel)),'r'))
return hfiles
def get_val(hfiles, var_name, remove_nan=None):
sub_result = []
for hfile in hfiles:
for key in hfile.keys():
if key != "metaData":
# print(hfile[key].keys())
sub_result.append(hfile[key][var_name].value)
result = np.concatenate(sub_result)
if remove_nan is not None:
result[~np.isfinite(result)]=remove_nan
return result
def get_selection(hfiles, title, central_cut=False,
Mr_cut=None, mr_cut = None,
mass_cut=None, rs_cut=False,
synthetic=None, ms_cut =None,
synthetic_type = None):
redshift = get_val(hfiles,'redshift')
slct = (redshift == redshift)
if central_cut:
central = get_val(hfiles, 'isCentral')
slct = slct & (central == 1)
title=title+', central galaxies'
title=title+'\n'
if mass_cut is not None:
host_mass = get_val(hfiles, 'hostHaloMass')
if isinstance(mass_cut, (list,)):
slct = slct & (mass_cut[0] < host_mass) & (host_mass < mass_cut[1])
title = title+'{:.0e} < M_halo < {:.0e}'.format(mass_cut[0],mass_cut[1])
else:
slct = slct & (mass_cut < host_mass)
title = title+'M_halo > {:.0e}'.format(mass_cut)
if Mr_cut is not None:
Mr = get_mag(hfiles, 'SDSS', 'rest', 'r')
if isinstance(Mr_cut, (list,)):
slct = slct & (Mr_cut[0] < Mr) & (Mr < Mr_cut[1])
title = title+' {:.1f} < Mr < {:.1f}'.format(Mr_cut[0],Mr_cut[1])
else:
slct = slct & (Mr < Mr_cut)
title = title+' Mr < {:.1f}'.format(Mr_cut)
if mr_cut is not None:
mr = get_mag(hfiles, 'SDSS', 'obs', 'r')
if isinstance(mr_cut, (list,)):
slct = slct & (mr_cut[0] < mr) & (mr < mr_cut[1])
title = title+' {:.1f} < mr < {:.1f}'.format(mr_cut[0],mr_cut[1])
else:
slct = slct & (mr < mr_cut)
title = title+' mr < {:.1f}'.format(mr_cut)
if rs_cut:
a = get_val(hfiles,'baseDC2/is_on_red_sequence_gr')
b = get_val(hfiles,'baseDC2/is_on_red_sequence_ri')
print(a)
slct = slct & (a & b)
title = title+', Red Seq.'
if synthetic is not None:
halo_id = get_val(hfiles,'halo_id')
slct = slct & ((halo_id < 0) == synthetic)
title = title +'Synth.'
if synthetic_type is not None:
halo_id = get_val(hfiles,'baseDC2/halo_id')
slct = slct & (halo_id == synthetic_type)
title = title +'halo_id == {}, '.format(synthetic_type)
if ms_cut is not None:
stellar_mass = get_val(hfiles,'totalMassStellar')
slct = slct & ( stellar_mass > ms_cut)
title = title + "M* > {:.2e}".format(ms_cut)
return slct, title
def plot_color_redshift_baseDC2_diagnostics(fname):
hfile = h5py.File(fname,'r')
magr = get_var(hfile, 'restframe_extincted_sdss_abs_magr')
magg = get_var(hfile, 'restframe_extincted_sdss_abs_magg')
redshift = get_var(hfile, 'redshift')
htag = get_var(hfile, 'target_halo_fof_halo_id')
plt.figure()
h,xbins, ybins = np.histogram2d(redshift, magg-magr, bins=500)
plt.pcolor(xbins, ybins, h.T, cmap='Blues', norm=clr.LogNorm())
plt.ylabel('g-r')
plt.xlabel('redshift')
plt.colorbar(label='population density')
plt.figure()
plt.hist(redshift, bins=256, label='All Galaxies')
plt.legend(loc='best')
print("calcing")
unique, cnt = np.unique(htag, return_counts=True)
indx = np.argmax(cnt)
print(unique[indx])
plt.figure()
plt.hist(redshift[htag==0], bins=256, label='Synthetic Galaxies')
plt.legend(loc='best')
plt.figure()
plt.hist(redshift[magr<-19], bins=256, label='Mr < -19')
plt.legend(loc='best')
plt.figure()
plt.hist(redshift[magr>-19], bins=256, label = 'Mr > -19')
plt.hist(redshift[magr>-18], bins=256, label = 'Mr > -18')
plt.hist(redshift[magr>-17], bins=256, label = 'Mr > -17')
plt.hist(redshift[magr>-16], bins=256, label = 'Mr > -16')
plt.legend(loc='best')
plt.show()
print(hfile['487'].keys())
def plot_ra_dec(hfiles, mag_cut = None):
ra = get_val(hfiles, 'ra')
dec = get_val(hfiles, 'dec')
plt.figure()
plt.hist2d(ra,dec, bins=128)
plt.show()
def plot_redshift(hfiles, slct, title):
redshift = get_val(hfiles, 'redshift')
magr = get_val(hfiles, 'restframe_extincted_sdss_abs_magr')
magg = get_val(hfiles, 'restframe_extincted_sdss_abs_magg')
plt.figure()
plt.hist2d(redshift, magr, bins =256)
#plt.show()
def plot_redshift_distance(hfiles, title):
x = get_val(hfiles, 'x')
y = get_val(hfiles, 'y')
z = get_val(hfiles, 'z')
r = np.sqrt(x*x + y*y + z*z)
target_halo_x = get_val(hfiles, 'target_halo_x')
target_halo_y = get_val(hfiles, 'target_halo_y')
target_halo_z = get_val(hfiles, 'target_halo_z')
target_halo_r = np.sqrt(target_halo_x**2 + target_halo_y**2 + target_halo_z**2)
redshift = get_val(hfiles, 'target_halo_redshift')
redshift_raw = get_val(hfiles, 'redshift')
slct = (redshift < 2.5) & (r > 4230)
halo_id = get_val(hfiles, 'halo_id')
print('x', x[slct])
print('y', y[slct])
print('z', z[slct])
print('halo_id', halo_id[slct])
central = get_val(hfiles, 'upid')==-1
print('central', central[slct])
host_halo_mvir = get_val(hfiles, 'host_halo_mvir')
print('host_halo_mvir', host_halo_mvir[slct])
restframe_extincted_sdss_abs_magr = get_val(hfiles, 'restframe_extincted_sdss_abs_magr')
print('restframe_extincted_sdss_abs_magr', restframe_extincted_sdss_abs_magr[slct])
target_halo_fof_halo_id = get_val(hfiles, 'target_halo_fof_halo_id')
print('target_halo_fof_halo_id', target_halo_fof_halo_id[slct])
for num in target_halo_fof_halo_id[slct]:
print(num)
print('redshift', redshift[slct])
plt.figure()
plt.plot(r, target_halo_r, ',')
plt.figure()
plt.plot(redshift[slct], r[slct], '.', alpha=1.0)
plt.xlabel('redshift')
plt.ylabel('distance [Mpc/h]')
plt.title(title)
plt.figure()
plt.plot(redshift, r, ',', alpha=1.0)
plt.xlabel('redshift')
plt.ylabel('distance [Mpc/h]')
plt.title(title)
plt.figure()
plt.plot(redshift, redshift_raw, ',')
plt.xlabel(redshift)
plt.ylabel(redshift)
indx = np.zeros(len(slct))
indx[slct] = 1.0
plt.figure()
plt.plot(indx, alpha=0.3)
indx = np.zeros(len(slct))
syn_cluster = halo_id == -1
indx[syn_cluster] = 1.0
plt.plot(indx)
if __name__ == "__main__":
fname = sys.argv[1]
healpix_pixels = sys.argv[2:]
hfiles = get_hfiles(fname, healpix_pixels)
# plot_ra_dec(hfiles)
#plot_redshift(hfiles)
slct, title = get_selection(hfiles, "")
plot_redshift_distance(hfiles, fname)
# plot_color_redshift_baseDC2_diagnostics(sys.argv[1])
plt.show()
| 32.661088
| 100
| 0.614399
|
770b7622edc0e033f94a22ed1113857ac09fa827
| 106
|
py
|
Python
|
config/test_run.py
|
ibalagurov/selenoid_workshop
|
39a3d9348e41fe508cbfa46954d2e3aecb05e638
|
[
"MIT"
] | 1
|
2020-08-24T07:41:29.000Z
|
2020-08-24T07:41:29.000Z
|
config/test_run.py
|
ibalagurov/selenoid_workshop
|
39a3d9348e41fe508cbfa46954d2e3aecb05e638
|
[
"MIT"
] | null | null | null |
config/test_run.py
|
ibalagurov/selenoid_workshop
|
39a3d9348e41fe508cbfa46954d2e3aecb05e638
|
[
"MIT"
] | null | null | null |
from config import env
ONE_SESSION = env.get_bool("ONE_SESSION", False)
GGR = env.get_bool("GGR", False)
| 21.2
| 48
| 0.745283
|
a750e5327687607e89115b278e34fb00907308e1
| 3,682
|
py
|
Python
|
netneurotools/civet.py
|
liuzhenqi77/netneurotools
|
fbdf9a3c0e4c5734dda336218553da50fae54267
|
[
"BSD-3-Clause"
] | 18
|
2019-08-01T00:15:17.000Z
|
2022-03-12T07:09:13.000Z
|
netneurotools/civet.py
|
liuzhenqi77/netneurotools
|
fbdf9a3c0e4c5734dda336218553da50fae54267
|
[
"BSD-3-Clause"
] | 100
|
2018-11-03T17:36:35.000Z
|
2021-12-11T13:21:20.000Z
|
netneurotools/civet.py
|
liuzhenqi77/netneurotools
|
fbdf9a3c0e4c5734dda336218553da50fae54267
|
[
"BSD-3-Clause"
] | 19
|
2017-10-24T14:44:31.000Z
|
2022-01-21T02:19:42.000Z
|
# -*- coding: utf-8 -*-
"""
Functions for working with CIVET data (ugh)
"""
import nibabel as nib
import numpy as np
from scipy.interpolate import griddata
from .datasets import fetch_civet, fetch_fsaverage
_MNI305to152 = np.array([[0.9975, -0.0073, 0.0176, -0.0429],
[0.0146, 1.0009, -0.0024, 1.5496],
[-0.0130, -0.0093, 0.9971, 1.1840],
[0.0000, 0.0000, 0.0000, 1.0000]])
def read_civet(fname):
"""
Reads a CIVET-style .obj geometry file
Parameters
----------
fname : str or os.PathLike
Filepath to .obj file
Returns
-------
vertices : (N, 3)
triangles : (T, 3)
"""
k, polygons = 0, []
with open(fname, 'r') as src:
n_vert = int(src.readline().split()[6])
vertices = np.zeros((n_vert, 3))
for i, line in enumerate(src):
if i < n_vert:
vertices[i] = [float(i) for i in line.split()]
elif i >= (2 * n_vert) + 5:
if not line.strip():
k = 1
elif k == 1:
polygons.extend([int(i) for i in line.split()])
triangles = np.reshape(np.asarray(polygons), (-1, 3))
return vertices, triangles
def civet_to_freesurfer(brainmap, surface='mid', version='v1',
freesurfer='fsaverage6', method='nearest',
data_dir=None):
"""
Projects `brainmap` in CIVET space to `freesurfer` fsaverage space
Uses a nearest-neighbor projection based on the geometry of the vertices
Parameters
----------
brainmap : array_like
CIVET brainmap to be converted to freesurfer space
surface : {'white', 'mid'}, optional
Which CIVET surface to use for geometry of `brainmap`. Default: 'mid'
version : {'v1', 'v2'}, optional
Which CIVET version to use for geometry of `brainmap`. Default: 'v1'
freesurfer : str, optional
Which version of FreeSurfer space to project data to. Must be one of
{'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5', 'fsaverage6'}.
Default: 'fsaverage6'
method : {'nearest', 'linear'}, optional
What method of interpolation to use when projecting the data between
surfaces. Default: 'nearest'
data_dir : str, optional
Path to use as data directory. If not specified, will check for
environmental variable 'NNT_DATA'; if that is not set, will use
`~/nnt-data` instead. Default: None
Returns
-------
data : np.ndarray
Provided `brainmap` mapped to FreeSurfer
"""
brainmap = np.asarray(brainmap)
densities = (81924, 327684)
n_vert = brainmap.shape[0]
if n_vert not in densities:
raise ValueError('Unable to interpret `brainmap` space; provided '
'array must have length in {}. Received: {}'
.format(densities, n_vert))
n_vert = n_vert // 2
icbm = fetch_civet(density='41k' if n_vert == 40962 else '164k',
version=version, data_dir=data_dir, verbose=0)[surface]
fsavg = fetch_fsaverage(version=freesurfer, data_dir=data_dir, verbose=0)
fsavg = fsavg['pial' if surface == 'mid' else surface]
data = []
for n, hemi in enumerate(('lh', 'rh')):
sl = slice(n_vert * n, n_vert * (n + 1))
vert_cv, _ = read_civet(getattr(icbm, hemi))
vert_fs = nib.affines.apply_affine(
_MNI305to152, nib.freesurfer.read_geometry(getattr(fsavg, hemi))[0]
)
data.append(griddata(vert_cv, brainmap[sl], vert_fs, method=method))
return np.hstack(data)
| 33.779817
| 79
| 0.58365
|
82cd40dde852df34836dae9c52391c6b167143c0
| 6,223
|
py
|
Python
|
jina/__init__.py
|
Virus2466/jina
|
9ca715bf73558c9a63aeb43205073a4404011a47
|
[
"Apache-2.0"
] | 1
|
2022-02-11T07:19:59.000Z
|
2022-02-11T07:19:59.000Z
|
jina/__init__.py
|
Sangwan5688/jina
|
ecd810543e19f91af80e91df11afb03ff96b1ec6
|
[
"Apache-2.0"
] | null | null | null |
jina/__init__.py
|
Sangwan5688/jina
|
ecd810543e19f91af80e91df11afb03ff96b1ec6
|
[
"Apache-2.0"
] | null | null | null |
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import datetime as _datetime
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import types as _types
import warnings as _warnings
if _sys.version_info < (3, 7, 0) or _sys.version_info >= (3, 10, 0):
raise OSError(f'Jina requires Python 3.7/3.8/3.9, but yours is {_sys.version_info}')
__windows__ = _sys.platform == 'win32'
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
_set_start_method(_start_method.lower())
_warnings.warn(f'multiprocessing start method is set to `{_start_method.lower()}`')
_os.environ.pop('JINA_MP_START_METHOD')
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '2.4.7'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.0.86'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS
# 1. clean this tuple,
# 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g"
# 3. copy all lines EXCEPT the first (which is the grep command in the last line)
__jina_env__ = (
'JINA_ARRAY_QUANT',
'JINA_CONTROL_PORT',
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DISABLE_UVLOOP',
'JINA_FULL_CLI',
'JINA_HUBBLE_REGISTRY',
'JINA_HUB_CACHE_DIR',
'JINA_HUB_ROOT',
'JINA_K8S_USE_TEST_PIP',
'JINA_LOG_CONFIG',
'JINA_LOG_ID',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_LOG_WORKSPACE',
'JINA_MP_START_METHOD',
'JINA_OPTIMIZER_TRIAL_WORKSPACE',
'JINA_POD_NAME',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_VCS_VERSION',
)
__default_host__ = _os.environ.get(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_endpoint__ = '/default'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
'groundtruths',
'groundtruths_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__root_dir__ = _os.path.dirname(_os.path.abspath(__file__))
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__root_dir__',
'__default_endpoint__',
'__default_executor__',
'__num_args_executor_func__',
'__unset_msg__',
'__windows__',
]
# ADD GLOBAL NAMESPACE VARIABLES
JINA_GLOBAL = _types.SimpleNamespace()
JINA_GLOBAL.scipy_installed = None
JINA_GLOBAL.tensorflow_installed = None
JINA_GLOBAL.torch_installed = None
JINA_GLOBAL.dgl_installed = None
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Client
from jina.clients import Client
# Document
from jina.types.document import Document
from jina.types.arrays.document import DocumentArray
from jina.types.arrays.memmap import DocumentArrayMemmap
# Executor
from jina.executors import BaseExecutor as Executor
from jina.executors.decorators import requests
# Flow
from jina.flow.base import Flow
from jina.flow.asyncio import AsyncFlow
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend(_names_with_underscore)
| 29.633333
| 122
| 0.712839
|
97a7757b74c82adad494657f789787c838407312
| 8,568
|
py
|
Python
|
demo_docs/source/conf.py
|
consiglionazionaledellericerche/docs-cnr-theme
|
8b041ffac388f737aa18aa2160b642c56cc6d898
|
[
"BSD-3-Clause"
] | null | null | null |
demo_docs/source/conf.py
|
consiglionazionaledellericerche/docs-cnr-theme
|
8b041ffac388f737aa18aa2160b642c56cc6d898
|
[
"BSD-3-Clause"
] | null | null | null |
demo_docs/source/conf.py
|
consiglionazionaledellericerche/docs-cnr-theme
|
8b041ffac388f737aa18aa2160b642c56cc6d898
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Sphinx RTD theme demo documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 3 11:56:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from os.path import abspath, join, dirname
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../docs_cnr_theme'))
# -- PROJECT Variables ------------------------------------------------
settings_project_name = 'Docs Italia Demo'
settings_copyright_copyleft = 'CC-BY 3.0 - Agenzia per l\'Italia Digitale'
settings_editor_name = "AgID - Agenzia per l'Italia Digitale"
settings_doc_version = 'bozza'
settings_doc_release = '1.0'
settings_doc_language = 'it'
settings_file_name = 'docs-italia-demo'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'docs_cnr_theme',
]
# Math
mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
numfig = True
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = settings_project_name
copyright = settings_copyright_copyleft
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = settings_doc_version
# The full version, including alpha/beta/rc tags.
release = settings_doc_release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = settings_doc_language
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- ReadTheDoc requirements and local template generation-----------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# override css_files to prevent injection of css files on rtd
if on_rtd:
html_context = {
'css_files': [
'_static/css/theme.css',
],
}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'docs_cnr_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../.."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = settings_file_name + 'doc'
# -- Options for LaTeX output --------------------------------------------------
#latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
#}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', settings_file_name + '.tex', settings_project_name,
settings_copyright_copyleft, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', settings_file_name, settings_project_name,
[settings_editor_name], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', settings_file_name, settings_project_name,
settings_copyright_copyleft, settings_project_name, settings_project_name,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.970149
| 109
| 0.713819
|
21cd7e571d3c5d7a1c0ebed202ee0607108b4abc
| 3,967
|
py
|
Python
|
plugins/inline.py
|
fakeenemy01/ProMusicBot
|
276d7a658a07bb13acd66090a2cd0fa93303c0b1
|
[
"MIT"
] | 1
|
2021-08-18T05:37:42.000Z
|
2021-08-18T05:37:42.000Z
|
plugins/inline.py
|
fakeenemy01/ProMusicBot
|
276d7a658a07bb13acd66090a2cd0fa93303c0b1
|
[
"MIT"
] | null | null | null |
plugins/inline.py
|
fakeenemy01/ProMusicBot
|
276d7a658a07bb13acd66090a2cd0fa93303c0b1
|
[
"MIT"
] | null | null | null |
#MIT License
#Copyright (c) 2021 @Professor_Botz
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from pyrogram.handlers import InlineQueryHandler
from youtubesearchpython import VideosSearch
from utils import USERNAME
from pyrogram.types import InlineQueryResultArticle, InputTextMessageContent, InlineKeyboardButton, InlineKeyboardMarkup
from pyrogram import Client, errors
from config import Config
REPLY_MESSAGE=Config.REPLY_MESSAGE
buttons = [
[
InlineKeyboardButton('⚡️Make Own Bot', url='https://heroku.com/deploy?template=https://github.com/FakeEnemy01/ProMusicBot'),
InlineKeyboardButton('🧩 Source Code', url='https://github.com/FakeEnemy01/ProMusicBot'),
],
[
InlineKeyboardButton('🎧Play Music', url=f'https://t.me/{USERNAME}'),
InlineKeyboardButton('👨🏼🦯 Help', callback_data='help')
]
]
@Client.on_inline_query()
async def search(client, query):
answers = []
if query.query == "ORU_MANDAN_PM_VANNU":
answers.append(
InlineQueryResultArticle(
title="Deploy",
input_message_content=InputTextMessageContent(f"{REPLY_MESSAGE}\n\n<b>You can't use this bot in your group, for that you have to make your own bot from the [SOURCE CODE](https://github.com/FakeEnemy01/ProMusicBot) below.</b>", disable_web_page_preview=True),
reply_markup=InlineKeyboardMarkup(buttons)
)
)
await query.answer(results=answers, cache_time=0)
return
string = query.query.lower().strip().rstrip()
if string == "":
await client.answer_inline_query(
query.id,
results=answers,
switch_pm_text=("Search a youtube video"),
switch_pm_parameter="help",
cache_time=0
)
else:
videosSearch = VideosSearch(string.lower(), limit=50)
for v in videosSearch.result()["result"]:
answers.append(
InlineQueryResultArticle(
title=v["title"],
description=("Duration: {} Views: {}").format(
v["duration"],
v["viewCount"]["short"]
),
input_message_content=InputTextMessageContent(
"/play https://www.youtube.com/watch?v={}".format(
v["id"]
)
),
thumb_url=v["thumbnails"][0]["url"]
)
)
try:
await query.answer(
results=answers,
cache_time=0
)
except errors.QueryIdInvalid:
await query.answer(
results=answers,
cache_time=0,
switch_pm_text=("Nothing found"),
switch_pm_parameter="",
)
__handlers__ = [
[
InlineQueryHandler(
search
)
]
]
| 39.67
| 274
| 0.62667
|
ae747ea7dee7e580055ace55bd218d06c059917a
| 391
|
py
|
Python
|
imparh/candidatos/migrations/0003_auto_20201103_1946.py
|
alexandresillva/imparh
|
c1eb2e05376a76520ca7e254d73a3981bd6234b0
|
[
"MIT"
] | null | null | null |
imparh/candidatos/migrations/0003_auto_20201103_1946.py
|
alexandresillva/imparh
|
c1eb2e05376a76520ca7e254d73a3981bd6234b0
|
[
"MIT"
] | null | null | null |
imparh/candidatos/migrations/0003_auto_20201103_1946.py
|
alexandresillva/imparh
|
c1eb2e05376a76520ca7e254d73a3981bd6234b0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-11-03 22:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('candidatos', '0002_auto_20201103_1945'),
]
operations = [
migrations.RenameField(
model_name='candidato',
old_name='rg_orgao_emissor_2',
new_name='rg_orgao_emissor',
),
]
| 20.578947
| 50
| 0.613811
|
80c98f2e0778f03713cd05aedac004e65cad50b3
| 796
|
py
|
Python
|
scrapy/scrapy_crawler/pipelines.py
|
FedeGuastadisegni/PS-WB
|
2012e0bb70a63ca55d5956e2fdbb4c15bc2011d6
|
[
"MIT"
] | null | null | null |
scrapy/scrapy_crawler/pipelines.py
|
FedeGuastadisegni/PS-WB
|
2012e0bb70a63ca55d5956e2fdbb4c15bc2011d6
|
[
"MIT"
] | null | null | null |
scrapy/scrapy_crawler/pipelines.py
|
FedeGuastadisegni/PS-WB
|
2012e0bb70a63ca55d5956e2fdbb4c15bc2011d6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
class ScrapRapiPagoPipeline(object):
def process_item(self, item, spider):
item['address'] = self.cleanup_address(item['address'])
item.save()
return item
def cleanup_address(self, address):
m = re.search('(?P<numb>(\d+))\s(?P=numb)', address)
if m:
return address[0:m.end(1)]
return address
def __init__(self, stats, settings):
self.stats = stats
dispatcher.connect(self.save_crawl_stats,signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.stats,crawler.settings)
def save_crawl_stats(self):
record_crawl_stats(self.cur,self.stats,self.crawl_instance)
| 27.448276
| 68
| 0.677136
|
3bb6c55ed94c3f70a70699bdcbca0e9dc0dc9d55
| 4,589
|
py
|
Python
|
svm+pca.py
|
yzgrfsy/inceptionv3
|
35fcc9c61135f0c0e686a7137b5479063635180e
|
[
"MIT"
] | null | null | null |
svm+pca.py
|
yzgrfsy/inceptionv3
|
35fcc9c61135f0c0e686a7137b5479063635180e
|
[
"MIT"
] | null | null | null |
svm+pca.py
|
yzgrfsy/inceptionv3
|
35fcc9c61135f0c0e686a7137b5479063635180e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 02 15:51:14 2016
@author: JiaY
"""
from time import time
from PIL import Image
import glob
import numpy as np
import sys
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
#设置解释器为utf8编码,不知为何文件开头的注释没用。
#尽管这样设置,在IPython下仍然会出错,只能用原装Python解释器执行本程序
# reload(sys)
# sys.setdefaultencoding("utf8")
# print sys.getdefaultencoding()
PICTURE_PATH = "/home/yuzhg/new/"
PICTURE_PATH = "/home/yuzhg/2/"
all_data_set = [] #原始总数据集,二维矩阵n*m,n个样例,m个属性
all_data_label = [] #总数据对应的类标签
def get_picture():
label = 1
#读取所有图片并一维化
while (label <= 1):
for name in glob.glob(PICTURE_PATH + str(label) + "/*.pgm"):
print(name)
img = Image.open(name)
#img.getdata()
#print(img.shape)
#np.array(img).reshape(1, 600*600)
all_data_set.append(list(img.getdata()))
all_data_label.append(label)
label += 1
get_picture()
print(all_data_label)
print(all_data_set)
#print(all_data_set.shape)
n_components = 16#这个降维后的特征值个数如果太大,比如100,结果将极其不准确,为何??
pca = PCA(n_components = n_components, svd_solver='auto',
whiten=True).fit(all_data_set)
#PCA降维后的总数据集
all_data_pca = pca.transform(all_data_set)
#X为降维后的数据,y是对应类标签
X = np.array(all_data_pca)
y = np.array(all_data_label)
#输入核函数名称和参数gamma值,返回SVM训练十折交叉验证的准确率
def SVM(kernel_name, param):
#十折交叉验证计算出平均准确率
#n_splits交叉验证,随机取
kf = KFold(n_splits=10, shuffle = True)
precision_average = 0.0
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5]}#自动穷举出最优的C参数
clf = GridSearchCV(SVC(kernel=kernel_name, class_weight='balanced', gamma = param),
param_grid)
for train, test in kf.split(X):
clf = clf.fit(X[train], y[train])
#print(clf.best_estimator_)
test_pred = clf.predict(X[test])
#print classification_report(y[test], test_pred)
#计算平均准确率
precision = 0
for i in range(0, len(y[test])):
if (y[test][i] == test_pred[i]):
precision = precision + 1
precision_average = precision_average + float(precision)/len(y[test])
precision_average = precision_average / 10
print (u"准确率为" + str(precision_average))
return precision_average
t0 = time()
kernel_to_test = ['rbf', 'poly', 'sigmoid']
#rint SVM(kernel_to_test[0], 0.1)
plt.figure(1)
for kernel_name in kernel_to_test:
x_label = np.linspace(0.0001, 1, 100)
y_label = []
for i in x_label:
y_label.append(SVM(kernel_name, i))
plt.plot(x_label, y_label, label=kernel_name)
print("done in %0.3fs" % (time() - t0))
# plt.xlabel("Gamma")
# plt.ylabel("Precision")
# plt.title('Different Kernels Contrust')
# plt.legend()
# plt.show()
"""
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42)
clf = clf.fit(X_train, y_train)
test_pred = clf.predict(X_test)
print classification_report(y_test, test_pred)
#十折交叉验证计算出平均准确率
precision_average = 0.0
for train, test in kf.split(X):
clf = clf.fit(X[train], y[train])
#print(clf.best_estimator_)
test_pred = clf.predict(X[test])
#print classification_report(y[test], test_pred)
#计算平均准确率
precision = 0
for i in range(0, len(y[test])):
if (y[test][i] == test_pred[i]):
precision = precision + 1
precision_average = precision_average + float(precision)/len(y[test])
precision_average = precision_average / 10
print ("准确率为" + str(precision_average))
print("done in %0.3fs" % (time() - t0))
"""
"""
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(all_data_pca, all_data_label)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
all_data_set_pred = clf.predict(all_data_pca)
#target_names = range(1, 11)
print(classification_report(all_data_set_pred, all_data_label))
"""
| 32.316901
| 88
| 0.650469
|
572554ccb0db3eff4b8237868c95cea0b35b3393
| 1,519
|
py
|
Python
|
src/manager/om/script/gspylib/inspection/items/database/CheckXid.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/inspection/items/database/CheckXid.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/inspection/items/database/CheckXid.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
from gspylib.inspection.common import SharedFuncs
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
class CheckXid(BaseItem):
def __init__(self):
super(CheckXid, self).__init__(self.__class__.__name__)
def doCheck(self):
sqlcmd = "select txid_current();"
output = SharedFuncs.runSqlCmd(sqlcmd, self.user, "", self.port,
self.tmpPath, "postgres",
self.mpprcFile)
num = int(output)
self.result.raw = sqlcmd
self.result.val = "The xid value is %s" % output
if (num <= 1000000000):
self.result.rst = ResultStatus.OK
elif (num <= 1800000000):
self.result.rst = ResultStatus.WARNING
else:
self.result.rst = ResultStatus.NG
| 37.975
| 78
| 0.61817
|
2c7890cfe3bc2bcc27540bf03f769342a06bc5dc
| 117
|
py
|
Python
|
study/curso-em-video/exercises/008.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
study/curso-em-video/exercises/008.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
study/curso-em-video/exercises/008.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
n5=float(input('Digite o comprimento em metro: '))
print('A medida convertida é {}cm ou {}mm'.format(n5*100,n5*1000))
| 58.5
| 66
| 0.709402
|
395e39ff236ff7060319988a1a9a7a447460ef1e
| 47,983
|
py
|
Python
|
tests/io/test_dataset.py
|
ShreyashKad/sleap
|
32fec569d44ee727f4ec46e6bd94ccfb28398b83
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/io/test_dataset.py
|
ShreyashKad/sleap
|
32fec569d44ee727f4ec46e6bd94ccfb28398b83
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/io/test_dataset.py
|
ShreyashKad/sleap
|
32fec569d44ee727f4ec46e6bd94ccfb28398b83
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import os
import pytest
import numpy as np
from pathlib import Path
import sleap
from sleap.skeleton import Skeleton
from sleap.instance import Instance, Point, LabeledFrame, PredictedInstance, Track
from sleap.io.video import Video, MediaVideo
from sleap.io.dataset import Labels, load_file
from sleap.io.legacy import load_labels_json_old
from sleap.gui.suggestions import VideoFrameSuggestions, SuggestionFrame
TEST_H5_DATASET = "tests/data/hdf5_format_v1/training.scale=0.50,sigma=10.h5"
def _check_labels_match(expected_labels, other_labels, format="png"):
"""
A utility function to check whether to sets of labels match.
This doesn't directly compares some things (like video objects).
Args:
expected_labels: The expected labels
other_labels: The labels to check against expected
Returns:
True for match, False otherwise.
"""
# Check the top level objects
for x, y in zip(expected_labels.skeletons, other_labels.skeletons):
# Inline the skeleton matches check to see if we can get a better
# idea of why this test fails non-deterministically. The callstack
# doesn't go deeper than the method call in pytest for some reason.
# assert x.matches(y). The code below is weird because it is converted
# from Skeleton.__eq__.
self = x
other = y
# First check names, duh!
if other.name != self.name:
assert False
def dict_match(dict1, dict2):
return dict1 == dict2
# Check if the graphs are iso-morphic
import networkx as nx
is_isomorphic = nx.is_isomorphic(
self._graph, other._graph, node_match=dict_match
)
if not is_isomorphic:
assert False
# Now check that the nodes have the same labels and order. They can have
# different weights I guess?!
for node1, node2 in zip(self._graph.nodes, other._graph.nodes):
if node1.name != node2.name:
assert False
for x, y in zip(expected_labels.tracks, other_labels.tracks):
assert x.name == y.name and x.spawned_on == y.spawned_on
# Check that we have the same thing
for expected_label, label in zip(expected_labels.labels, other_labels.labels):
assert expected_label.frame_idx == label.frame_idx
frame_idx = label.frame_idx
frame_data = label.video.get_frame(frame_idx)[0:15, 0:15, :]
expected_frame_data = expected_label.video.get_frame(frame_idx)[0:15, 0:15, :]
# Compare the first frames of the videos, do it on a small sub-region to
# make the test reasonable in time.
if format is "png":
assert np.allclose(frame_data, expected_frame_data)
# Compare the instances
assert all(
i1.matches(i2)
for (i1, i2) in zip(expected_label.instances, label.instances)
)
# This test takes to long, break after 20 or so.
if frame_idx > 20:
break
def test_labels_json(tmpdir, multi_skel_vid_labels):
json_file_path = os.path.join(tmpdir, "dataset.json")
if os.path.isfile(json_file_path):
os.remove(json_file_path)
# Save to json
Labels.save_json(labels=multi_skel_vid_labels, filename=json_file_path)
# Make sure the filename is there
assert os.path.isfile(json_file_path)
# Lets load the labels back in and make sure we haven't lost anything.
loaded_labels = Labels.load_json(json_file_path)
# Check that we have the same thing
_check_labels_match(multi_skel_vid_labels, loaded_labels)
# Check that we don't have the very same objects
assert not multi_skel_vid_labels.skeletons[0] is loaded_labels.skeletons[0]
assert not multi_skel_vid_labels.nodes[3] in loaded_labels.nodes
assert not multi_skel_vid_labels.videos[0] is loaded_labels.videos[0]
# Reload json using objects from original labels
# We'll also test load_file() here
loaded_labels = Labels.load_file(json_file_path, match_to=multi_skel_vid_labels)
# Check that we now do have the same objects
assert multi_skel_vid_labels.skeletons[0] in loaded_labels.skeletons
assert multi_skel_vid_labels.nodes[3] in loaded_labels.nodes
assert multi_skel_vid_labels.videos[0] in loaded_labels.videos
def test_load_labels_json_old(tmpdir):
new_file_path = os.path.join(tmpdir, "centered_pair_v2.json")
# Function to run some checks on loaded labels
def check_labels(labels):
skel_node_names = [
"head",
"neck",
"thorax",
"abdomen",
"wingL",
"wingR",
"forelegL1",
"forelegL2",
"forelegL3",
"forelegR1",
"forelegR2",
"forelegR3",
"midlegL1",
"midlegL2",
"midlegL3",
"midlegR1",
"midlegR2",
"midlegR3",
"hindlegL1",
"hindlegL2",
"hindlegL3",
"hindlegR1",
"hindlegR2",
"hindlegR3",
]
# Do some basic checks
assert len(labels) == 70
# Make sure we only create one video object and it works
assert len({label.video for label in labels}) == 1
assert labels[0].video.get_frame(0).shape == (384, 384, 1)
# Check some frame objects.
assert labels[0].frame_idx == 0
assert labels[40].frame_idx == 573
# Check the skeleton
assert labels[0].instances[0].skeleton.node_names == skel_node_names
labels = Labels.load_json("tests/data/json_format_v1/centered_pair.json")
check_labels(labels)
# Save out to new JSON format
Labels.save_json(labels, new_file_path)
# Reload and check again.
new_labels = Labels.load_json(new_file_path)
check_labels(new_labels)
def test_label_accessors(centered_pair_labels):
labels = centered_pair_labels
video = labels.videos[0]
assert len(labels.find(video)) == 70
assert labels[video] == labels.find(video)
f = labels.frames(video, from_frame_idx=1)
assert next(f).frame_idx == 15
assert next(f).frame_idx == 31
f = labels.frames(video, from_frame_idx=31, reverse=True)
assert next(f).frame_idx == 15
f = labels.frames(video, from_frame_idx=0, reverse=True)
assert next(f).frame_idx == 1092
next(f)
next(f)
# test that iterator now has fewer items left
assert len(list(f)) == 70 - 3
assert labels.instance_count(video, 15) == 2
assert labels.instance_count(video, 7) == 0
assert labels[0].video == video
assert labels[0].frame_idx == 0
assert labels[61].video == video
assert labels[61].frame_idx == 954
assert labels[np.int64(0)] == labels[0]
assert labels[np.int64(61)] == labels[61]
assert labels[np.array([0, 61])] == labels[[0, 61]]
assert len(labels.find(video, frame_idx=954)) == 1
assert len(labels.find(video, 954)) == 1
assert labels.find(video, 954)[0] == labels[61]
assert labels.find_first(video) == labels[0]
assert labels.find_first(video, 954) == labels[61]
assert labels.find_last(video) == labels[69]
assert labels[video, 954] == labels[61]
assert labels[video, 0] == labels[0]
assert labels[video] == labels.labels
assert len(labels.find(video, 101)) == 0
assert labels.find_first(video, 101) is None
with pytest.raises(KeyError):
labels[video, 101]
dummy_video = Video(backend=MediaVideo)
assert len(labels.find(dummy_video)) == 0
with pytest.raises(KeyError):
labels[dummy_video]
def test_scalar_properties():
# Scalar
dummy_video = Video(backend=MediaVideo)
dummy_skeleton = Skeleton()
dummy_instance = Instance(dummy_skeleton)
dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[dummy_instance])
labels = Labels()
labels.append(dummy_frame)
assert labels.video == dummy_video
assert labels.skeleton == dummy_skeleton
# Empty
labels = Labels()
with pytest.raises(ValueError):
labels.video
with pytest.raises(ValueError):
labels.skeleton
# More than one video
dummy_skeleton = Skeleton()
labels = Labels()
labels.append(
LabeledFrame(
Video(backend=MediaVideo), frame_idx=0, instances=[Instance(dummy_skeleton)]
)
)
labels.append(
LabeledFrame(
Video(backend=MediaVideo), frame_idx=0, instances=[Instance(dummy_skeleton)]
)
)
assert labels.skeleton == dummy_skeleton
with pytest.raises(ValueError):
labels.video
# More than one skeleton
dummy_video = Video(backend=MediaVideo)
labels = Labels()
labels.append(
LabeledFrame(dummy_video, frame_idx=0, instances=[Instance(Skeleton())])
)
labels.append(
LabeledFrame(dummy_video, frame_idx=1, instances=[Instance(Skeleton())])
)
assert labels.video == dummy_video
with pytest.raises(ValueError):
labels.skeleton
def test_has_missing_videos():
labels = Labels()
labels.add_video(Video.from_filename("small_robot.mp4"))
assert labels.has_missing_videos
labels = Labels()
labels.add_video(Video.from_filename("tests/data/videos/small_robot.mp4"))
assert not labels.has_missing_videos
def test_label_mutability():
dummy_video = Video(backend=MediaVideo)
dummy_skeleton = Skeleton()
dummy_instance = Instance(dummy_skeleton)
dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[dummy_instance])
labels = Labels()
labels.append(dummy_frame)
assert dummy_video in labels.videos
assert dummy_video in labels
assert dummy_skeleton in labels.skeletons
assert dummy_skeleton in labels
assert dummy_frame in labels.labeled_frames
assert dummy_frame in labels
assert (dummy_video, 0) in labels
assert (dummy_video, 1) not in labels
dummy_video2 = Video(backend=MediaVideo)
dummy_skeleton2 = Skeleton(name="dummy2")
dummy_instance2 = Instance(dummy_skeleton2)
dummy_frame2 = LabeledFrame(dummy_video2, frame_idx=0, instances=[dummy_instance2])
assert dummy_video2 not in labels
assert dummy_skeleton2 not in labels
assert dummy_frame2 not in labels
labels.append(dummy_frame2)
assert dummy_video2 in labels
assert dummy_frame2 in labels
labels.remove_video(dummy_video2)
assert dummy_video2 not in labels
assert dummy_frame2 not in labels
assert len(labels.find(dummy_video2)) == 0
assert len(labels) == 1
labels.append(LabeledFrame(dummy_video, frame_idx=0))
assert len(labels) == 1
dummy_frames = [LabeledFrame(dummy_video, frame_idx=i) for i in range(10)]
dummy_frames2 = [LabeledFrame(dummy_video2, frame_idx=i) for i in range(10)]
for f in dummy_frames + dummy_frames2:
labels.append(f)
assert len(labels) == 20
labels.remove_video(dummy_video2)
assert len(labels) == 10
assert len(labels.find(dummy_video)) == 10
assert dummy_frame in labels
assert all([label in labels for label in dummy_frames[1:]])
assert dummy_video2 not in labels
assert len(labels.find(dummy_video2)) == 0
assert all([label not in labels for label in dummy_frames2])
labels.remove_video(dummy_video)
assert len(labels.find(dummy_video)) == 0
def test_labels_merge():
dummy_video = Video(backend=MediaVideo)
dummy_skeleton = Skeleton()
dummy_skeleton.add_node("node")
labels = Labels()
dummy_frames = []
# Add 10 instances with different points (so they aren't "redundant")
for i in range(10):
instance = Instance(skeleton=dummy_skeleton, points=dict(node=Point(i, i)))
dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[instance])
dummy_frames.append(dummy_frame)
labels.labeled_frames.extend(dummy_frames)
assert len(labels) == 10
assert len(labels.labeled_frames[0].instances) == 1
labels.merge_matching_frames()
assert len(labels) == 1
assert len(labels.labeled_frames[0].instances) == 10
def test_complex_merge():
dummy_video_a = Video.from_filename("foo.mp4")
dummy_video_b = Video.from_filename("foo.mp4")
dummy_skeleton_a = Skeleton()
dummy_skeleton_a.add_node("node")
dummy_skeleton_b = Skeleton()
dummy_skeleton_b.add_node("node")
dummy_instances_a = []
dummy_instances_a.append(
Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(1, 1)))
)
dummy_instances_a.append(
Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(2, 2)))
)
labels_a = Labels()
labels_a.append(
LabeledFrame(dummy_video_a, frame_idx=0, instances=dummy_instances_a)
)
dummy_instances_b = []
dummy_instances_b.append(
Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(1, 1)))
)
dummy_instances_b.append(
Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(3, 3)))
)
labels_b = Labels()
labels_b.append(
LabeledFrame(dummy_video_b, frame_idx=0, instances=dummy_instances_b)
) # conflict
labels_b.append(
LabeledFrame(dummy_video_b, frame_idx=1, instances=dummy_instances_b)
) # clean
merged, extra_a, extra_b = Labels.complex_merge_between(labels_a, labels_b)
# Check that we have the cleanly merged frame
assert dummy_video_a in merged
assert len(merged[dummy_video_a]) == 1 # one merged frame
assert len(merged[dummy_video_a][1]) == 2 # with two instances
# Check that labels_a includes redundant and clean
assert len(labels_a.labeled_frames) == 2
assert len(labels_a.labeled_frames[0].instances) == 1
assert labels_a.labeled_frames[0].instances[0].points[0].x == 1
assert len(labels_a.labeled_frames[1].instances) == 2
assert labels_a.labeled_frames[1].instances[0].points[0].x == 1
assert labels_a.labeled_frames[1].instances[1].points[0].x == 3
# Check that extra_a/b includes the appropriate conflicting instance
assert len(extra_a) == 1
assert len(extra_b) == 1
assert len(extra_a[0].instances) == 1
assert len(extra_b[0].instances) == 1
assert extra_a[0].instances[0].points[0].x == 2
assert extra_b[0].instances[0].points[0].x == 3
# Check that objects were unified
assert extra_a[0].video == extra_b[0].video
# Check resolving the conflict using new
Labels.finish_complex_merge(labels_a, extra_b)
assert len(labels_a.labeled_frames) == 2
assert len(labels_a.labeled_frames[0].instances) == 2
assert labels_a.labeled_frames[0].instances[1].points[0].x == 3
def test_merge_predictions():
dummy_video_a = Video.from_filename("foo.mp4")
dummy_video_b = Video.from_filename("foo.mp4")
dummy_skeleton_a = Skeleton()
dummy_skeleton_a.add_node("node")
dummy_skeleton_b = Skeleton()
dummy_skeleton_b.add_node("node")
dummy_instances_a = []
dummy_instances_a.append(
Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(1, 1)))
)
dummy_instances_a.append(
Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(2, 2)))
)
labels_a = Labels()
labels_a.append(
LabeledFrame(dummy_video_a, frame_idx=0, instances=dummy_instances_a)
)
dummy_instances_b = []
dummy_instances_b.append(
Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(1, 1)))
)
dummy_instances_b.append(
PredictedInstance(
skeleton=dummy_skeleton_b, points=dict(node=Point(3, 3)), score=1
)
)
labels_b = Labels()
labels_b.append(
LabeledFrame(dummy_video_b, frame_idx=0, instances=dummy_instances_b)
)
# Frames have one redundant instance (perfect match) and all the
# non-matching instances are different types (one predicted, one not).
merged, extra_a, extra_b = Labels.complex_merge_between(labels_a, labels_b)
assert len(merged[dummy_video_a]) == 1
assert len(merged[dummy_video_a][0]) == 1 # the predicted instance was merged
assert not extra_a
assert not extra_b
def test_merge_with_package(min_labels_robot, tmpdir):
# Add a suggestion and save with images.
labels = min_labels_robot
labels.suggestions.append(
sleap.io.dataset.SuggestionFrame(video=labels.video, frame_idx=1)
)
pkg_path = os.path.join(tmpdir, "test.pkg.slp")
assert len(labels.predicted_instances) == 0
labels.save(pkg_path, with_images=True, embed_suggested=True)
# Load package.
labels_pkg = sleap.load_file(pkg_path)
assert isinstance(labels_pkg.video.backend, sleap.io.video.HDF5Video)
assert labels_pkg.video.backend.has_embedded_images
assert isinstance(
labels_pkg.video.backend._source_video.backend, sleap.io.video.MediaVideo
)
assert len(labels_pkg.predicted_instances) == 0
# Add prediction.
inst = labels_pkg.user_instances[0]
pts = inst.numpy()
inst_pr = sleap.PredictedInstance.from_pointsarray(
pts,
skeleton=labels_pkg.skeleton,
point_confidences=np.zeros(len(pts)),
instance_score=1.0,
)
labels_pkg.append(
sleap.LabeledFrame(
video=labels_pkg.suggestions[0].video,
frame_idx=labels_pkg.suggestions[0].frame_idx,
instances=[inst_pr],
)
)
# Save labels without image data.
preds_path = pkg_path + ".predictions.slp"
labels_pkg.save(preds_path)
# Load predicted labels created from package.
labels_pr = sleap.load_file(preds_path)
assert len(labels_pr.predicted_instances) == 1
# Merge with base labels.
base_video_path = labels.video.backend.filename
merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
labels, labels_pr
)
assert len(labels.videos) == 1
assert labels.video.backend.filename == base_video_path
assert len(labels.predicted_instances) == 1
assert len(extra_base) == 0
assert len(extra_new) == 0
assert labels.predicted_instances[0].frame.frame_idx == 1
# Merge predictions to package instead.
labels_pkg = sleap.load_file(pkg_path)
labels_pr = sleap.load_file(preds_path)
assert len(labels_pkg.predicted_instances) == 0
base_video_path = labels_pkg.video.backend.filename
merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
labels_pkg, labels_pr
)
assert len(labels_pkg.videos) == 1
assert labels_pkg.video.backend.filename == base_video_path
assert len(labels_pkg.predicted_instances) == 1
assert len(extra_base) == 0
assert len(extra_new) == 0
assert labels_pkg.predicted_instances[0].frame.frame_idx == 1
def test_merge_with_skeleton_conflict(min_labels, tmpdir):
# Save out base labels
base_labels = min_labels.copy()
base_labels.save(f"{tmpdir}/base_labels.slp")
# Merge labels with a renamed node
labels = base_labels.copy()
labels[0].frame_idx = 1
labels.skeleton.relabel_node("A", "a")
labels.save(f"{tmpdir}/labels.renamed_node.slp")
labels = base_labels.copy()
merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
labels, sleap.load_file(f"{tmpdir}/labels.renamed_node.slp")
)
assert len(extra_base) == 0
assert len(extra_new) == 0
assert labels.skeleton.node_names == ["A", "B", "a"]
assert np.isnan(labels[0][0].numpy()).any(axis=1).tolist() == [False, False, True]
assert np.isnan(labels[1][0].numpy()).any(axis=1).tolist() == [True, False, False]
# Merge labels with a new node
labels = base_labels.copy()
labels[0].frame_idx = 1
labels.skeleton.add_node("C")
inst = labels[0][0]
inst["C"] = sleap.instance.Point(x=1, y=2, visible=True)
labels.save(f"{tmpdir}/labels.new_node.slp")
labels = base_labels.copy()
merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
labels, sleap.load_file(f"{tmpdir}/labels.new_node.slp")
)
assert len(extra_base) == 0
assert len(extra_new) == 0
assert labels.skeleton.node_names == ["A", "B", "C"]
assert np.isnan(labels[0][0].numpy()).any(axis=1).tolist() == [False, False, True]
assert np.isnan(labels[1][0].numpy()).any(axis=1).tolist() == [False, False, False]
# Merge labels with a deleted node
labels = base_labels.copy()
labels[0].frame_idx = 1
labels.skeleton.delete_node("A")
labels.save(f"{tmpdir}/labels.deleted_node.slp")
labels = base_labels.copy()
merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
labels, sleap.load_file(f"{tmpdir}/labels.deleted_node.slp")
)
assert len(extra_base) == 0
assert len(extra_new) == 0
assert labels.skeleton.node_names == ["A", "B"]
assert np.isnan(labels[0][0].numpy()).any(axis=1).tolist() == [False, False]
assert np.isnan(labels[1][0].numpy()).any(axis=1).tolist() == [True, False]
assert (labels[0][0].numpy()[1] == labels[1][0].numpy()[1]).all()
def skeleton_ids_from_label_instances(labels):
return list(map(id, (lf.instances[0].skeleton for lf in labels.labeled_frames)))
def test_duplicate_skeletons_serializing():
vid = Video.from_filename("foo.mp4")
skeleton_a = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
skeleton_b = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
lf_a = LabeledFrame(vid, frame_idx=2, instances=[Instance(skeleton_a)])
lf_b = LabeledFrame(vid, frame_idx=3, instances=[Instance(skeleton_b)])
new_labels = Labels(labeled_frames=[lf_a, lf_b])
new_labels_json = new_labels.to_dict()
def test_distinct_skeletons_serializing():
vid = Video.from_filename("foo.mp4")
skeleton_a = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
skeleton_b = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
skeleton_b.add_node("foo")
lf_a = LabeledFrame(vid, frame_idx=2, instances=[Instance(skeleton_a)])
lf_b = LabeledFrame(vid, frame_idx=3, instances=[Instance(skeleton_b)])
new_labels = Labels(labeled_frames=[lf_a, lf_b])
# Make sure we can serialize this
new_labels_json = new_labels.to_dict()
def test_unify_skeletons():
vid = Video.from_filename("foo.mp4")
skeleton_a = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
skeleton_b = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
lf_a = LabeledFrame(vid, frame_idx=2, instances=[Instance(skeleton_a)])
lf_b = LabeledFrame(vid, frame_idx=3, instances=[Instance(skeleton_b)])
labels = Labels()
labels.extend_from([lf_a], unify=True)
labels.extend_from([lf_b], unify=True)
ids = skeleton_ids_from_label_instances(labels)
# Make sure that skeleton_b got replaced with skeleton_a when we
# added the frame with "unify" set
assert len(set(ids)) == 1
# Make sure we can serialize this
labels.to_dict()
def test_dont_unify_skeletons():
vid = Video.from_filename("foo.mp4")
skeleton_a = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
skeleton_b = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
lf_a = LabeledFrame(vid, frame_idx=2, instances=[Instance(skeleton_a)])
lf_b = LabeledFrame(vid, frame_idx=3, instances=[Instance(skeleton_b)])
labels = Labels(labeled_frames=[lf_a])
labels.extend_from([lf_b], unify=False)
ids = skeleton_ids_from_label_instances(labels)
# Make sure we still have two distinct skeleton objects
assert len(set(ids)) == 2
# Make sure we can serialize this
labels.to_dict()
def test_instance_access():
labels = Labels()
dummy_skeleton = Skeleton()
dummy_video = Video(backend=MediaVideo)
dummy_video2 = Video(backend=MediaVideo)
for i in range(10):
labels.append(
LabeledFrame(
dummy_video,
frame_idx=i,
instances=[Instance(dummy_skeleton), Instance(dummy_skeleton)],
)
)
for i in range(10):
labels.append(
LabeledFrame(
dummy_video2,
frame_idx=i,
instances=[
Instance(dummy_skeleton),
Instance(dummy_skeleton),
Instance(dummy_skeleton),
],
)
)
assert len(labels.all_instances) == 50
assert len(list(labels.instances(video=dummy_video))) == 20
assert len(list(labels.instances(video=dummy_video2))) == 30
def test_basic_suggestions(small_robot_mp4_vid):
dummy_video = small_robot_mp4_vid
dummy_skeleton = Skeleton()
dummy_instance = Instance(dummy_skeleton)
dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[dummy_instance])
labels = Labels()
labels.append(dummy_frame)
suggestions = VideoFrameSuggestions.suggest(
labels=labels, params=dict(method="sample", per_video=13)
)
labels.set_suggestions(suggestions)
assert len(labels.get_video_suggestions(dummy_video)) == 13
def test_deserialize_suggestions(small_robot_mp4_vid, tmpdir):
dummy_video = small_robot_mp4_vid
dummy_skeleton = Skeleton()
dummy_instance = Instance(dummy_skeleton)
dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[dummy_instance])
labels = Labels()
labels.append(dummy_frame)
suggestions = VideoFrameSuggestions.suggest(
labels=labels, params=dict(method="sample", per_video=13)
)
labels.set_suggestions(suggestions)
filename = os.path.join(tmpdir, "new_suggestions.h5")
Labels.save_file(filename=filename, labels=labels)
new_suggestion_labels = Labels.load_file(filename)
assert len(suggestions) == len(new_suggestion_labels.suggestions)
assert [frame.frame_idx for frame in suggestions] == [
frame.frame_idx for frame in new_suggestion_labels.suggestions
]
def test_load_labels_mat(mat_labels):
assert len(mat_labels.nodes) == 6
assert len(mat_labels) == 43
@pytest.mark.parametrize("format", ["png", "mjpeg/avi"])
def test_save_labels_with_frame_data(multi_skel_vid_labels, tmpdir, format):
"""
Test saving and loading a labels dataset with frame data included
as JSON.
"""
# Lets take a subset of the labels so this doesn't take too long
multi_skel_vid_labels.labeled_frames = multi_skel_vid_labels.labeled_frames[5:30]
filename = os.path.join(tmpdir, "test.json")
Labels.save_json(
multi_skel_vid_labels,
filename=filename,
save_frame_data=True,
frame_data_format=format,
# compress=True,
)
print(filename, os.path.exists(filename + ".zip"))
# Load the data back in
loaded_labels = Labels.load_json(f"{filename}.zip")
# Check that we have the same thing
_check_labels_match(multi_skel_vid_labels, loaded_labels, format=format)
# Make sure we can load twice
loaded_labels = Labels.load_json(f"{filename}.zip")
def test_save_labels_and_frames_hdf5(multi_skel_vid_labels, tmpdir):
# Lets take a subset of the labels so this doesn't take too long
labels = multi_skel_vid_labels
labels.labeled_frames = labels.labeled_frames[5:30]
filename = os.path.join(tmpdir, "test.h5")
Labels.save_hdf5(filename=filename, labels=labels, save_frame_data=True)
loaded_labels = Labels.load_hdf5(filename=filename)
_check_labels_match(labels, loaded_labels)
# Rename file (after closing videos)
for vid in loaded_labels.videos:
vid.close()
filerename = os.path.join(tmpdir, "test_rename.h5")
os.rename(filename, filerename)
# Make sure we open can after rename
loaded_labels = Labels.load_hdf5(filename=filerename)
def test_save_frame_data_hdf5(min_labels_slp, tmpdir):
labels = Labels(min_labels_slp.labeled_frames)
labels.append(LabeledFrame(video=labels.video, frame_idx=1))
labels.suggestions.append(SuggestionFrame(video=labels.video, frame_idx=2))
fn = os.path.join(tmpdir, "test_user_only.slp")
labels.save_frame_data_hdf5(
fn,
format="png",
user_labeled=True,
all_labeled=False,
suggested=False,
)
assert Video.from_filename(fn, dataset="video0").embedded_frame_inds == [0]
fn = os.path.join(tmpdir, "test_all_labeled.slp")
labels.save_frame_data_hdf5(
fn,
format="png",
user_labeled=False,
all_labeled=True,
suggested=False,
)
assert Video.from_filename(fn, dataset="video0").embedded_frame_inds == [0, 1]
fn = os.path.join(tmpdir, "test_suggested.slp")
labels.save_frame_data_hdf5(
fn,
format="png",
user_labeled=False,
all_labeled=False,
suggested=True,
)
assert Video.from_filename(fn, dataset="video0").embedded_frame_inds == [2]
fn = os.path.join(tmpdir, "test_all.slp")
labels.save_frame_data_hdf5(
fn,
format="png",
user_labeled=False,
all_labeled=True,
suggested=True,
)
assert Video.from_filename(fn, dataset="video0").embedded_frame_inds == [0, 1, 2]
def test_save_labels_with_images(min_labels_slp, tmpdir):
labels = Labels(min_labels_slp.labeled_frames)
labels.append(LabeledFrame(video=labels.video, frame_idx=1))
labels.suggestions.append(SuggestionFrame(video=labels.video, frame_idx=2))
fn = os.path.join(tmpdir, "test_user_only.slp")
labels.save(
fn,
with_images=True,
embed_all_labeled=False,
embed_suggested=False,
)
assert Labels.load_file(fn).video.embedded_frame_inds == [0]
fn = os.path.join(tmpdir, "test_all_labeled.slp")
labels.save(
fn,
with_images=True,
embed_all_labeled=True,
embed_suggested=False,
)
assert Labels.load_file(fn).video.embedded_frame_inds == [0, 1]
fn = os.path.join(tmpdir, "test_suggested.slp")
labels.save(
fn,
with_images=True,
embed_all_labeled=False,
embed_suggested=True,
)
assert Labels.load_file(fn).video.embedded_frame_inds == [0, 2]
fn = os.path.join(tmpdir, "test_all.slp")
labels.save(
fn,
with_images=True,
embed_all_labeled=True,
embed_suggested=True,
)
assert Labels.load_file(fn).video.embedded_frame_inds == [0, 1, 2]
def test_labels_hdf5(multi_skel_vid_labels, tmpdir):
labels = multi_skel_vid_labels
filename = os.path.join(tmpdir, "test.h5")
Labels.save_hdf5(filename=filename, labels=labels)
loaded_labels = Labels.load_hdf5(filename=filename)
_check_labels_match(labels, loaded_labels)
def test_labels_predicted_hdf5(multi_skel_vid_labels, tmpdir):
labels = multi_skel_vid_labels
filename = os.path.join(tmpdir, "test.h5")
# Lets promote some of these Instances to predicted instances
for label in labels:
for i, instance in enumerate(label.instances):
if i % 2 == 0:
label.instances[i] = PredictedInstance.from_instance(instance, 0.3)
# Lets also add some from_predicted values
for label in labels:
label.instances[1].from_predicted = label.instances[0]
# Try adding a node to the skeleton
labels.skeletons[0].add_node("new node")
# Save and compare the results
Labels.save_hdf5(filename=filename, labels=labels)
loaded_labels = Labels.load_hdf5(filename=filename)
_check_labels_match(labels, loaded_labels)
# Try deleting nodes from the skeleton
node = labels.skeletons[0].nodes[-1]
labels.skeletons[0].delete_node(node)
node = labels.skeletons[0].nodes[-1]
labels.skeletons[0].delete_node(node)
# Save and compare the results
Labels.save_hdf5(filename=filename, labels=labels)
loaded_labels = Labels.load_hdf5(filename=filename)
_check_labels_match(labels, loaded_labels)
def test_labels_append_hdf5(multi_skel_vid_labels, tmpdir):
labels = multi_skel_vid_labels
filename = os.path.join(tmpdir, "test.h5")
# Save each frame of the Labels dataset one by one in append
# mode
for label in labels:
# Just do the first 20 to speed things up
if label.frame_idx > 20:
break
Labels.save_hdf5(filename=filename, labels=Labels([label]), append=True)
# Now load the dataset and make sure we get the same thing we started
# with.
loaded_labels = Labels.load_hdf5(filename=filename)
_check_labels_match(labels, loaded_labels)
def test_hdf5_from_predicted(multi_skel_vid_labels, tmpdir):
labels = multi_skel_vid_labels
filename = os.path.join(tmpdir, "test.h5")
# Add some predicted instances to create from_predicted links
for frame_num, frame in enumerate(labels):
if frame_num % 20 == 0:
frame.instances[0].from_predicted = PredictedInstance.from_instance(
frame.instances[0], float(frame_num)
)
frame.instances.append(frame.instances[0].from_predicted)
# Save and load, compare the results
Labels.save_hdf5(filename=filename, labels=labels)
loaded_labels = Labels.load_hdf5(filename=filename)
for frame_num, frame in enumerate(loaded_labels):
if frame_num % 20 == 0:
assert frame.instances[0].from_predicted.score == float(frame_num)
def test_hdf5_empty_save(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
Labels.save_hdf5(filename=filename, labels=labels)
dummy_video = Video.from_filename("foo.mp4")
labels.videos.append(dummy_video)
Labels.save_hdf5(filename=filename, labels=labels)
def test_makedirs(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "new/dirs/test.h5")
Labels.save_file(filename=filename, labels=labels)
def test_multivideo_tracks():
vid_a = Video.from_filename("foo.mp4")
vid_b = Video.from_filename("bar.mp4")
skeleton = Skeleton.load_json("tests/data/skeleton/fly_skeleton_legs.json")
track_a = Track(spawned_on=2, name="A")
track_b = Track(spawned_on=3, name="B")
inst_a = Instance(track=track_a, skeleton=skeleton)
inst_b = Instance(track=track_b, skeleton=skeleton)
lf_a = LabeledFrame(vid_a, frame_idx=2, instances=[inst_a])
lf_b = LabeledFrame(vid_b, frame_idx=3, instances=[inst_b])
labels = Labels(labeled_frames=[lf_a, lf_b])
# Try setting video B instance to track used in video A
labels.track_swap(vid_b, new_track=track_a, old_track=track_b, frame_range=(3, 4))
assert inst_b.track == track_a
def test_many_tracks_hdf5(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
labels.tracks = [Track(spawned_on=i, name=f"track {i}") for i in range(4000)]
Labels.save_hdf5(filename=filename, labels=labels)
def test_many_videos_hdf5(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
labels.videos = [Video.from_filename(f"video {i}.mp4") for i in range(3000)]
Labels.save_hdf5(filename=filename, labels=labels)
def test_many_suggestions_hdf5(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
video = Video.from_filename("foo.mp4")
labels.videos = [video]
labels.suggestions = [SuggestionFrame(video, i) for i in range(3000)]
Labels.save_hdf5(filename=filename, labels=labels)
def test_path_fix(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
# Add a video without a full path
labels.add_video(Video.from_filename("small_robot.mp4"))
Labels.save_hdf5(filename=filename, labels=labels)
# Pass the path to the directory with the video
labels = Labels.load_file(filename, video_search="tests/data/videos/")
# Make sure we got the actual video path by searching that directory
assert len(labels.videos) == 1
assert labels.videos[0].filename == "tests/data/videos/small_robot.mp4"
def test_path_fix_with_new_full_path(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
# Add video with bad filename
labels.add_video(Video.from_filename("foo.mp4"))
Labels.save_hdf5(filename=filename, labels=labels)
# Pass list of full video paths to use instead of those in labels
labels = Labels.load_file(
filename, video_search=["tests/data/videos/small_robot.mp4"]
)
# Make sure we got the actual video path by searching that directory
assert len(labels.videos) == 1
assert labels.videos[0].filename == "tests/data/videos/small_robot.mp4"
def test_load_file(tmpdir):
labels = Labels()
filename = os.path.join(tmpdir, "test.h5")
labels.add_video(Video.from_filename("small_robot.mp4"))
Labels.save_hdf5(filename=filename, labels=labels)
# Fix video path from full path
labels = load_file(filename, search_paths="tests/data/videos/small_robot.mp4")
assert Path(labels.video.filename).samefile("tests/data/videos/small_robot.mp4")
# No auto-detect
labels = load_file(filename, detect_videos=False)
assert labels.video.filename == "small_robot.mp4"
# Fix video path by searching in the labels folder
tmpvid = tmpdir.join("small_robot.mp4")
tmpvid.write("") # dummy file
assert load_file(filename).video.filename == tmpvid
assert load_file(filename, search_paths=str(tmpdir)).video.filename == tmpvid
assert load_file(filename, search_paths=str(tmpvid)).video.filename == tmpvid
def test_local_path_save(tmpdir, monkeypatch):
filename = "test.h5"
# Set current working directory (monkeypatch isolates other tests)
monkeypatch.chdir(tmpdir)
# Try saving with relative path
Labels.save_file(filename=filename, labels=Labels())
assert os.path.exists(os.path.join(tmpdir, filename))
def test_slp_file(min_labels_slp, min_labels):
assert min_labels.videos[0].filename == min_labels_slp.videos[0].filename
def test_provenance(tmpdir):
labels = Labels(provenance=dict(source="test_provenance"))
filename = os.path.join(tmpdir, "test.slp")
# Add a video without a full path
labels.add_video(Video.from_filename("small_robot.mp4"))
Labels.save_file(filename=filename, labels=labels)
labels = Labels.load_file(filename)
print(labels.provenance)
assert labels.provenance["source"] == "test_provenance"
def test_has_frame():
video = Video(backend=MediaVideo)
labels = Labels([LabeledFrame(video=video, frame_idx=0)])
assert labels.has_frame(labels[0])
assert labels.has_frame(labels[0], use_cache=False)
assert labels.has_frame(LabeledFrame(video=video, frame_idx=0))
assert labels.has_frame(video=video, frame_idx=0)
assert labels.has_frame(video=video, frame_idx=0, use_cache=False)
assert not labels.has_frame(LabeledFrame(video=video, frame_idx=1))
assert not labels.has_frame(LabeledFrame(video=video, frame_idx=1), use_cache=False)
assert not labels.has_frame(video=video, frame_idx=1)
with pytest.raises(ValueError):
labels.has_frame()
with pytest.raises(ValueError):
labels.has_frame(video=video)
with pytest.raises(ValueError):
labels.has_frame(frame_idx=1)
@pytest.fixture
def removal_test_labels():
skeleton = Skeleton()
video = Video(backend=MediaVideo(filename="test"))
lf_user_only = LabeledFrame(
video=video, frame_idx=0, instances=[Instance(skeleton=skeleton)]
)
lf_pred_only = LabeledFrame(
video=video, frame_idx=1, instances=[PredictedInstance(skeleton=skeleton)]
)
lf_both = LabeledFrame(
video=video,
frame_idx=2,
instances=[Instance(skeleton=skeleton), PredictedInstance(skeleton=skeleton)],
)
labels = Labels([lf_user_only, lf_pred_only, lf_both])
return labels
def test_copy(removal_test_labels):
new_labels = removal_test_labels.copy()
new_labels[0].instances = []
new_labels.remove_frame(new_labels[-1])
assert len(removal_test_labels[0].instances) == 1
assert len(removal_test_labels) == 3
def test_remove_user_instances(removal_test_labels):
labels = removal_test_labels
assert len(labels) == 3
labels.remove_user_instances()
assert len(labels) == 2
assert labels[0].frame_idx == 1
assert not labels[0].has_user_instances
assert labels[0].has_predicted_instances
assert labels[1].frame_idx == 2
assert not labels[1].has_user_instances
assert labels[1].has_predicted_instances
def test_remove_user_instances_with_new_labels(removal_test_labels):
labels = removal_test_labels
assert len(labels) == 3
new_labels = Labels(
[
LabeledFrame(
video=labels.video,
frame_idx=0,
instances=[Instance(skeleton=labels.skeleton)],
)
]
)
labels.remove_user_instances(new_labels=new_labels)
assert len(labels) == 2
assert labels[0].frame_idx == 1
assert not labels[0].has_user_instances
assert labels[0].has_predicted_instances
assert labels[1].frame_idx == 2
assert labels[1].has_user_instances
assert labels[1].has_predicted_instances
def test_remove_predictions(removal_test_labels):
labels = removal_test_labels
assert len(labels) == 3
labels.remove_predictions()
assert len(labels) == 2
assert labels[0].frame_idx == 0
assert labels[0].has_user_instances
assert not labels[0].has_predicted_instances
assert labels[1].frame_idx == 2
assert labels[1].has_user_instances
assert not labels[1].has_predicted_instances
def test_remove_predictions_with_new_labels(removal_test_labels):
labels = removal_test_labels
assert len(labels) == 3
new_labels = Labels(
[
LabeledFrame(
video=labels.video,
frame_idx=1,
instances=[PredictedInstance(skeleton=labels.skeleton)],
)
]
)
labels.remove_predictions(new_labels=new_labels)
assert len(labels) == 2
assert labels[0].frame_idx == 0
assert labels[0].has_user_instances
assert not labels[0].has_predicted_instances
assert labels[1].frame_idx == 2
assert labels[1].has_user_instances
assert labels[1].has_predicted_instances
def test_labels_numpy(centered_pair_predictions):
trx = centered_pair_predictions.numpy(video=None, all_frames=False, untracked=False)
assert trx.shape == (1100, 27, 24, 2)
trx = centered_pair_predictions.numpy(video=None, all_frames=True, untracked=False)
assert trx.shape == (1100, 27, 24, 2)
# Remove the first labeled frame
centered_pair_predictions.remove_frame(centered_pair_predictions[0])
assert len(centered_pair_predictions) == 1099
trx = centered_pair_predictions.numpy(video=None, all_frames=False, untracked=False)
assert trx.shape == (1099, 27, 24, 2)
trx = centered_pair_predictions.numpy(video=None, all_frames=True, untracked=False)
assert trx.shape == (1100, 27, 24, 2)
labels_single = Labels(
[
LabeledFrame(
video=lf.video, frame_idx=lf.frame_idx, instances=[lf.instances[0]]
)
for lf in centered_pair_predictions
]
)
assert labels_single.numpy().shape == (1100, 1, 24, 2)
assert centered_pair_predictions.numpy(untracked=True).shape == (1100, 5, 24, 2)
for lf in centered_pair_predictions:
for inst in lf:
inst.track = None
centered_pair_predictions.tracks = []
assert centered_pair_predictions.numpy(untracked=False).shape == (1100, 0, 24, 2)
def test_remove_track(centered_pair_predictions):
labels = centered_pair_predictions
track = labels.tracks[-1]
track_insts = [inst for inst in labels.instances() if inst.track == track]
labels.remove_track(track)
assert track not in labels.tracks
assert all(inst.track != track for inst in labels.instances())
track = labels.tracks[0]
track_insts = [inst for inst in labels.instances() if inst.track == track]
labels.remove_track(track)
assert track not in labels.tracks
assert all(inst.track != track for inst in labels.instances())
def test_remove_all_tracks(centered_pair_predictions):
labels = centered_pair_predictions
labels.remove_all_tracks()
assert len(labels.tracks) == 0
assert all(inst.track is None for inst in labels.instances())
def test_remove_empty_frames(min_labels):
min_labels.append(sleap.LabeledFrame(video=min_labels.video, frame_idx=2))
assert len(min_labels) == 2
assert len(min_labels[-1]) == 0
min_labels.remove_empty_frames()
assert len(min_labels) == 1
assert len(min_labels[0]) == 2
def test_remove_empty_instances(min_labels):
for inst in min_labels.labeled_frames[0].instances:
for pt in inst.points:
pt.visible = False
min_labels.remove_empty_instances(keep_empty_frames=True)
assert len(min_labels) == 1
assert len(min_labels[0]) == 0
def test_remove_empty_instances_and_frames(min_labels):
for inst in min_labels.labeled_frames[0].instances:
for pt in inst.points:
pt.visible = False
min_labels.remove_empty_instances(keep_empty_frames=False)
assert len(min_labels) == 0
def test_merge_nodes(min_labels):
labels = min_labels.copy()
labels.skeleton.add_node("a")
inst = labels[0][0]
inst["A"] = Point(x=np.nan, y=np.nan, visible=False)
inst["a"] = Point(x=1, y=2, visible=True)
inst = labels[0][1]
inst["A"] = Point(x=0, y=1, visible=False)
inst["a"] = Point(x=1, y=2, visible=True)
labels.merge_nodes("A", "a")
assert labels.skeleton.node_names == ["A", "B"]
inst = labels[0][0]
assert inst["A"].x == 1 and inst["A"].y == 2
assert len(inst.nodes) == 2
inst = labels[0][1]
assert inst["A"].x == 1 and inst["A"].y == 2
assert len(inst.nodes) == 2
def test_split(centered_pair_predictions):
labels_a, labels_b = centered_pair_predictions.split(0.8)
assert len(labels_a) == 880
assert len(labels_b) == 220
assert (
len(
np.intersect1d(
[lf.frame_idx for lf in labels_a], [lf.frame_idx for lf in labels_b]
)
)
== 0
)
labels_a, labels_b = centered_pair_predictions.extract([0]).split(0.8)
assert len(labels_a) == 1
assert len(labels_b) == 1
assert labels_a[0] != labels_b[0]
assert labels_a[0].frame_idx == labels_b[0].frame_idx
labels_a, labels_b = centered_pair_predictions.extract([0]).split(0.8, copy=False)
assert len(labels_a) == 1
assert len(labels_b) == 1
assert labels_a[0] == labels_b[0]
def test_remove_untracked_instances(min_tracks_2node_labels):
"""Test removal of untracked instances and empty frames.
Args:
min_tracks_2node_labels: Labels object which contains user labeled frames with
tracked instances.
"""
labels = min_tracks_2node_labels
# Preprocessing
labels.labeled_frames[0].instances[0].track = None
labels.labeled_frames[1].instances = []
assert any(
[inst.track is None for lf in labels.labeled_frames for inst in lf.instances]
)
assert any([len(lf.instances) == 0 for lf in labels.labeled_frames])
# Test function with remove_empty_frames=False
labels.remove_untracked_instances(remove_empty_frames=False)
assert all(
[
inst.track is not None
for lf in labels.labeled_frames
for inst in lf.instances
]
)
assert any([len(lf.instances) == 0 for lf in labels.labeled_frames])
# Test function with remove_empty_frames=True
labels.remove_untracked_instances(remove_empty_frames=True)
assert all([len(lf.instances) > 0 for lf in labels.labeled_frames])
| 32.955357
| 88
| 0.689348
|
bf221ba2abcaa8454e9ce013125d72d90a2e8759
| 8,551
|
py
|
Python
|
htmldoom/elements.py
|
sayanarijit/htmldoom
|
c8e1528a35c5117db577c5c54f7e092e8e99222a
|
[
"MIT"
] | 43
|
2019-05-27T12:40:34.000Z
|
2021-11-15T09:52:47.000Z
|
htmldoom/elements.py
|
sayanarijit/htmldoom
|
c8e1528a35c5117db577c5c54f7e092e8e99222a
|
[
"MIT"
] | 44
|
2019-05-25T19:00:35.000Z
|
2019-11-16T19:05:57.000Z
|
htmldoom/elements.py
|
sayanarijit/htmldoom
|
c8e1528a35c5117db577c5c54f7e092e8e99222a
|
[
"MIT"
] | 5
|
2019-06-23T14:32:06.000Z
|
2020-06-20T18:18:26.000Z
|
"""All the elements that resides in an HTML DOM.
Example:
>>> from htmldoom import render, elements as e
>>> render(e.p(class_="comeclass")("This is a paragraph"))
<p class="someclass">This is a paragraph</p>
"""
from htmldoom.base import composite_tag, leaf_tag
__all__ = [
"a",
"abbr",
"address",
"animate",
"animateMotion",
"animateTransform",
"area",
"article",
"aside",
"audio",
"b",
"base",
"bdi",
"bdo",
"blockquote",
"body",
"br",
"button",
"canvas",
"caption",
"center",
"circle",
"circlePath",
"cite",
"code",
"col",
"colgroup",
"color_profile",
"data",
"datalist",
"dd",
"defs",
"del_",
"desc",
"details",
"dfn",
"dialog",
"discard",
"div",
"dl",
"dt",
"ellipse",
"em",
"embed",
"feBlend",
"feColorMatrix",
"feComponentTransfer",
"feComposite",
"feConvolveMatrix",
"feDiffuseLighting",
"feDisplacementMap",
"feDistantLight",
"feDropShadow",
"feFlood",
"feFuncA",
"feFuncB",
"feFuncG",
"feFuncR",
"feGaussianBlur",
"feImage",
"feMerge",
"feMergeNode",
"feMorphology",
"feOffset",
"fePointLight",
"feSpecularLighting",
"feSpotLight",
"feTile",
"feTurbulence",
"fieldset",
"figcaption",
"figure",
"filter_",
"footer",
"foreignObject",
"form",
"g",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hatch",
"hatchpath",
"head",
"header",
"hr",
"html",
"i",
"iframe",
"image",
"img",
"input_",
"ins",
"kbd",
"label",
"legend",
"li",
"line",
"linearGradient",
"link",
"main",
"map_",
"mark",
"marker",
"mask",
"meta",
"metadata",
"meter",
"mpath",
"nav",
"nobr",
"noscript",
"object_",
"ol",
"optgroup",
"option",
"output",
"p",
"param",
"path",
"pattern",
"picture",
"polygon",
"polyline",
"pre",
"progress",
"q",
"radialGradient",
"rect",
"rp",
"rt",
"ruby",
"s",
"samp",
"script",
"section",
"select",
"set_",
"small",
"solidcolor",
"source",
"span",
"stop",
"strong",
"style",
"sub",
"summary",
"sup",
"svg",
"switch",
"symbol",
"table",
"tbody",
"td",
"template",
"text",
"textarea",
"textPath",
"tfoot",
"th",
"thead",
"time",
"title",
"tr",
"track",
"tspan",
"u",
"ul",
"use",
"var",
"view",
"video",
"wbr",
]
a = composite_tag("a")
abbr = composite_tag("abbr")
address = composite_tag("address")
animate = composite_tag("animate")
animateMotion = composite_tag("animateMotion")
animateTransform = composite_tag("animateTransform")
area = leaf_tag("area")
article = composite_tag("article")
aside = composite_tag("aside")
audio = composite_tag("audio")
b = composite_tag("b")
base = leaf_tag("base")
bdi = composite_tag("bdi")
bdo = composite_tag("bdo")
blockquote = composite_tag("blockquote")
body = composite_tag("body")
br = leaf_tag("br")
button = composite_tag("button")
canvas = composite_tag("canvas")
caption = composite_tag("caption")
center = composite_tag("center")
circle = composite_tag("circle")
circlePath = composite_tag("circlePath")
cite = composite_tag("cite")
code = composite_tag("code")
col = leaf_tag("col")
colgroup = composite_tag("colgroup")
color_profile = composite_tag("profile")
data = composite_tag("data")
datalist = composite_tag("datalist")
dd = composite_tag("dd")
defs = composite_tag("defs")
del_ = composite_tag("del")
desc = composite_tag("desc")
details = composite_tag("details")
dfn = composite_tag("dfn")
dialog = composite_tag("dialog")
discard = composite_tag("discard")
div = composite_tag("div")
dl = composite_tag("dl")
dt = composite_tag("dt")
ellipse = composite_tag("ellipse")
em = composite_tag("em")
embed = composite_tag("embed")
feBlend = composite_tag("feBlend")
feColorMatrix = composite_tag("feColorMatrix")
feComponentTransfer = composite_tag("feComponentTransfer")
feComposite = composite_tag("feComposite")
feConvolveMatrix = composite_tag("feConvolveMatrix")
feDiffuseLighting = composite_tag("feDiffuseLighting")
feDisplacementMap = composite_tag("feDisplacementMap")
feDistantLight = composite_tag("feDistantLight")
feDropShadow = composite_tag("feDropShadow")
feFlood = composite_tag("feFlood")
feFuncA = composite_tag("feFuncA")
feFuncB = composite_tag("feFuncB")
feFuncG = composite_tag("feFuncG")
feFuncR = composite_tag("feFuncR")
feGaussianBlur = composite_tag("feGaussianBlur")
feImage = composite_tag("feImage")
feMerge = composite_tag("feMerge")
feMergeNode = composite_tag("feMergeNode")
feMorphology = composite_tag("feMorphology")
feOffset = composite_tag("feOffset")
fePointLight = composite_tag("fePointLight")
feSpecularLighting = composite_tag("feSpecularLighting")
feSpotLight = composite_tag("feSpotLight")
feTile = composite_tag("feTile")
feTurbulence = composite_tag("feTurbulence")
fieldset = composite_tag("fieldset")
figcaption = composite_tag("figcaption")
figure = composite_tag("figure")
filter_ = composite_tag("filter")
footer = composite_tag("footer")
foreignObject = leaf_tag("foreignObject")
form = composite_tag("form")
g = composite_tag("g")
h1 = composite_tag("h1")
h2 = composite_tag("h2")
h3 = composite_tag("h3")
h4 = composite_tag("h4")
h5 = composite_tag("h5")
h6 = composite_tag("h6")
hatch = composite_tag("hatch")
hatchpath = composite_tag("hatchpath")
head = composite_tag("head")
header = composite_tag("header")
hr = leaf_tag("hr")
html = composite_tag("html")
i = composite_tag("i")
iframe = composite_tag("iframe")
image = composite_tag("image")
img = leaf_tag("img")
input_ = leaf_tag("input")
ins = composite_tag("ins")
kbd = composite_tag("kbd")
label = composite_tag("label")
legend = composite_tag("legend")
li = composite_tag("li")
line = composite_tag("line")
linearGradient = composite_tag("linearGradient")
link = leaf_tag("link")
main = composite_tag("main")
map_ = composite_tag("map")
mark = composite_tag("mark")
marker = composite_tag("marker")
mask = composite_tag("mask")
meta = leaf_tag("meta")
metadata = composite_tag("metadata")
meter = leaf_tag("meter")
mpath = composite_tag("mpath")
nav = composite_tag("nav")
nobr = composite_tag("nobr")
noscript = composite_tag("noscript")
object_ = composite_tag("object")
ol = composite_tag("ol")
optgroup = composite_tag("optgroup")
option = composite_tag("option")
output = composite_tag("output")
p = composite_tag("p")
param = leaf_tag("param")
path = composite_tag("path")
pattern = composite_tag("pattern")
picture = composite_tag("picture")
polygon = composite_tag("polygon")
polyline = composite_tag("polyline")
pre = composite_tag("pre")
progress = composite_tag("progress")
q = composite_tag("q")
radialGradient = composite_tag("radialGradient")
rect = composite_tag("rect")
rp = composite_tag("rp")
rt = composite_tag("rt")
ruby = composite_tag("ruby")
s = composite_tag("s")
samp = composite_tag("samp")
script = composite_tag("script")
section = composite_tag("section")
select = composite_tag("select")
set_ = composite_tag("set")
small = composite_tag("small")
solidcolor = composite_tag("solidcolor")
source = leaf_tag("source")
span = composite_tag("span")
stop = composite_tag("stop")
strong = composite_tag("strong")
style = composite_tag("style")
sub = composite_tag("sub")
summary = composite_tag("summary")
sup = composite_tag("sup")
svg = composite_tag("svg")
switch = composite_tag("switch")
symbol = composite_tag("symbol")
table = composite_tag("table")
tbody = composite_tag("tbody")
td = composite_tag("td")
template = composite_tag("template")
text = composite_tag("text")
textarea = composite_tag("textarea")
textPath = composite_tag("textPath")
tfoot = composite_tag("tfoot")
th = composite_tag("th")
thead = composite_tag("thead")
time = composite_tag("time")
title = composite_tag("title")
tr = composite_tag("tr")
track = leaf_tag("track")
tspan = composite_tag("tspan")
u = composite_tag("u")
ul = composite_tag("ul")
use = composite_tag("use")
var = composite_tag("var")
view = composite_tag("view")
video = composite_tag("video")
wbr = leaf_tag("wbr")
| 15.894052
| 62
| 0.641212
|
1db37851d40614c8a66257276f430a515b09822a
| 58,075
|
py
|
Python
|
test/engine/test_reflection.py
|
Thhhza/sqlalchemy
|
f2b267043e17b2b769dc2a5b8139f6be2a3d4e84
|
[
"MIT"
] | 1
|
2015-11-07T12:34:26.000Z
|
2015-11-07T12:34:26.000Z
|
test/engine/test_reflection.py
|
Thhhza/sqlalchemy
|
f2b267043e17b2b769dc2a5b8139f6be2a3d4e84
|
[
"MIT"
] | 1
|
2021-08-07T12:14:52.000Z
|
2021-08-07T12:14:52.000Z
|
test/engine/test_reflection.py
|
Thhhza/sqlalchemy
|
f2b267043e17b2b769dc2a5b8139f6be2a3d4e84
|
[
"MIT"
] | null | null | null |
import operator
import unicodedata
import sqlalchemy as sa
from sqlalchemy import schema, events, event, inspect
from sqlalchemy import MetaData, Integer, String
from sqlalchemy.testing import (ComparesTables, engines, AssertsCompiledSQL,
fixtures, skip)
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.util import ue
metadata, users = None, None
class ReflectionTest(fixtures.TestBase, ComparesTables):
__backend__ = True
@testing.exclude('mssql', '<', (10, 0, 0),
'Date is only supported on MSSQL 2008+')
@testing.exclude('mysql', '<', (4, 1, 1),
'early types are squirrely')
@testing.provide_metadata
def test_basic_reflection(self):
meta = self.metadata
users = Table('engine_users', meta,
Column('user_id', sa.INT, primary_key=True),
Column('user_name', sa.VARCHAR(20), nullable=False),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
Column('test3', sa.Text),
Column('test4', sa.Numeric(10, 2), nullable=False),
Column('test5', sa.Date),
Column('parent_user_id', sa.Integer,
sa.ForeignKey('engine_users.user_id')),
Column('test6', sa.Date, nullable=False),
Column('test7', sa.Text),
Column('test8', sa.LargeBinary),
Column('test_passivedefault2', sa.Integer, server_default='5'),
Column('test9', sa.LargeBinary(100)),
Column('test10', sa.Numeric(10, 2)),
test_needs_fk=True,
)
addresses = Table(
'engine_email_addresses',
meta,
Column('address_id', sa.Integer, primary_key=True),
Column('remote_user_id', sa.Integer,
sa.ForeignKey(users.c.user_id)),
Column('email_address', sa.String(20)),
test_needs_fk=True,
)
meta.create_all()
meta2 = MetaData()
reflected_users = Table('engine_users', meta2,
autoload=True,
autoload_with=testing.db)
reflected_addresses = Table('engine_email_addresses',
meta2, autoload=True, autoload_with=testing.db)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.provide_metadata
def test_two_foreign_keys(self):
meta = self.metadata
Table(
't1',
meta,
Column('id', sa.Integer, primary_key=True),
Column('t2id', sa.Integer, sa.ForeignKey('t2.id')),
Column('t3id', sa.Integer, sa.ForeignKey('t3.id')),
test_needs_fk=True,
)
Table('t2', meta,
Column('id', sa.Integer, primary_key=True),
test_needs_fk=True)
Table('t3', meta,
Column('id', sa.Integer, primary_key=True),
test_needs_fk=True)
meta.create_all()
meta2 = MetaData()
t1r, t2r, t3r = [Table(x, meta2, autoload=True,
autoload_with=testing.db) for x in ('t1',
't2', 't3')]
assert t1r.c.t2id.references(t2r.c.id)
assert t1r.c.t3id.references(t3r.c.id)
def test_nonexistent(self):
meta = MetaData(testing.db)
assert_raises(sa.exc.NoSuchTableError, Table, 'nonexistent',
meta, autoload=True)
assert 'nonexistent' not in meta.tables
@testing.provide_metadata
def test_include_columns(self):
meta = self.metadata
foo = Table('foo', meta, *[Column(n, sa.String(30))
for n in ['a', 'b', 'c', 'd', 'e', 'f']])
meta.create_all()
meta2 = MetaData(testing.db)
foo = Table('foo', meta2, autoload=True,
include_columns=['b', 'f', 'e'])
# test that cols come back in original order
eq_([c.name for c in foo.c], ['b', 'e', 'f'])
for c in ('b', 'f', 'e'):
assert c in foo.c
for c in ('a', 'c', 'd'):
assert c not in foo.c
# test against a table which is already reflected
meta3 = MetaData(testing.db)
foo = Table('foo', meta3, autoload=True)
foo = Table('foo', meta3, include_columns=['b', 'f', 'e'],
extend_existing=True)
eq_([c.name for c in foo.c], ['b', 'e', 'f'])
for c in ('b', 'f', 'e'):
assert c in foo.c
for c in ('a', 'c', 'd'):
assert c not in foo.c
@testing.provide_metadata
def test_extend_existing(self):
meta = self.metadata
Table('t', meta,
Column('id', Integer, primary_key=True),
Column('x', Integer),
Column('y', Integer),
Column('z', Integer, server_default="5"),
)
meta.create_all()
m2 = MetaData()
old_z = Column('z', String, primary_key=True)
old_y = Column('y', String)
old_q = Column('q', Integer)
t2 = Table('t', m2, old_z, old_q)
eq_(t2.primary_key.columns, (t2.c.z, ))
t2 = Table('t', m2, old_y,
extend_existing=True,
autoload=True,
autoload_with=testing.db)
eq_(
set(t2.columns.keys()),
set(['x', 'y', 'z', 'q', 'id'])
)
eq_(t2.primary_key.columns, (t2.c.id, ))
assert t2.c.z is not old_z
assert t2.c.y is old_y
assert t2.c.z.type._type_affinity is Integer
assert t2.c.q is old_q
m3 = MetaData()
t3 = Table('t', m3, Column('z', Integer))
t3 = Table('t', m3, extend_existing=False,
autoload=True,
autoload_with=testing.db)
eq_(
set(t3.columns.keys()),
set(['z'])
)
m4 = MetaData()
old_z = Column('z', String, primary_key=True)
old_y = Column('y', String)
old_q = Column('q', Integer)
t4 = Table('t', m4, old_z, old_q)
eq_(t4.primary_key.columns, (t4.c.z, ))
t4 = Table('t', m4, old_y,
extend_existing=True,
autoload=True,
autoload_replace=False,
autoload_with=testing.db)
eq_(
set(t4.columns.keys()),
set(['x', 'y', 'z', 'q', 'id'])
)
eq_(t4.primary_key.columns, (t4.c.id, ))
assert t4.c.z is old_z
assert t4.c.y is old_y
assert t4.c.z.type._type_affinity is String
assert t4.c.q is old_q
@testing.emits_warning(r".*omitted columns")
@testing.provide_metadata
def test_include_columns_indexes(self):
m = self.metadata
t1 = Table('t1', m, Column('a', sa.Integer), Column('b', sa.Integer))
sa.Index('foobar', t1.c.a, t1.c.b)
sa.Index('bat', t1.c.a)
m.create_all()
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
assert len(t2.indexes) == 2
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True, include_columns=['a'])
assert len(t2.indexes) == 1
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True, include_columns=['a', 'b'])
assert len(t2.indexes) == 2
@testing.provide_metadata
def test_autoload_replace_foreign_key_nonpresent(self):
"""test autoload_replace=False with col plus FK
establishes the FK not present in the DB.
"""
Table('a', self.metadata, Column('id', Integer, primary_key=True))
Table('b', self.metadata, Column('id', Integer, primary_key=True),
Column('a_id', Integer))
self.metadata.create_all()
m2 = MetaData()
b2 = Table('b', m2, Column('a_id', Integer, sa.ForeignKey('a.id')))
a2 = Table('a', m2, autoload=True, autoload_with=testing.db)
b2 = Table('b', m2, extend_existing=True, autoload=True,
autoload_with=testing.db,
autoload_replace=False)
assert b2.c.id is not None
assert b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 2)
@testing.provide_metadata
def test_autoload_replace_foreign_key_ispresent(self):
"""test autoload_replace=False with col plus FK mirroring
DB-reflected FK skips the reflected FK and installs
the in-python one only.
"""
Table('a', self.metadata, Column('id', Integer, primary_key=True))
Table('b', self.metadata, Column('id', Integer, primary_key=True),
Column('a_id', Integer, sa.ForeignKey('a.id')))
self.metadata.create_all()
m2 = MetaData()
b2 = Table('b', m2, Column('a_id', Integer, sa.ForeignKey('a.id')))
a2 = Table('a', m2, autoload=True, autoload_with=testing.db)
b2 = Table('b', m2, extend_existing=True, autoload=True,
autoload_with=testing.db,
autoload_replace=False)
assert b2.c.id is not None
assert b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 2)
@testing.provide_metadata
def test_autoload_replace_foreign_key_removed(self):
"""test autoload_replace=False with col minus FK that's in the
DB means the FK is skipped and doesn't get installed at all.
"""
Table('a', self.metadata, Column('id', Integer, primary_key=True))
Table('b', self.metadata, Column('id', Integer, primary_key=True),
Column('a_id', Integer, sa.ForeignKey('a.id')))
self.metadata.create_all()
m2 = MetaData()
b2 = Table('b', m2, Column('a_id', Integer))
a2 = Table('a', m2, autoload=True, autoload_with=testing.db)
b2 = Table('b', m2, extend_existing=True, autoload=True,
autoload_with=testing.db,
autoload_replace=False)
assert b2.c.id is not None
assert not b2.c.a_id.references(a2.c.id)
eq_(len(b2.constraints), 1)
@testing.provide_metadata
def test_autoload_replace_primary_key(self):
Table('a', self.metadata, Column('id', Integer))
self.metadata.create_all()
m2 = MetaData()
a2 = Table('a', m2, Column('id', Integer, primary_key=True))
Table('a', m2, autoload=True, autoload_with=testing.db,
autoload_replace=False, extend_existing=True)
eq_(list(a2.primary_key), [a2.c.id])
def test_autoload_replace_arg(self):
Table('t', MetaData(), autoload_replace=False)
@testing.provide_metadata
def test_autoincrement_col(self):
"""test that 'autoincrement' is reflected according to sqla's policy.
Don't mark this test as unsupported for any backend !
(technically it fails with MySQL InnoDB since "id" comes before "id2")
"""
meta = self.metadata
Table('test', meta,
Column('id', sa.Integer, primary_key=True),
Column('data', sa.String(50)),
mysql_engine='MyISAM'
)
Table('test2', meta,
Column('id', sa.Integer, sa.ForeignKey('test.id'),
primary_key=True),
Column('id2', sa.Integer, primary_key=True),
Column('data', sa.String(50)),
mysql_engine='MyISAM'
)
meta.create_all()
m2 = MetaData(testing.db)
t1a = Table('test', m2, autoload=True)
assert t1a._autoincrement_column is t1a.c.id
t2a = Table('test2', m2, autoload=True)
assert t2a._autoincrement_column is t2a.c.id2
@skip('sqlite')
@testing.provide_metadata
def test_unknown_types(self):
"""Test the handling of unknown types for the given dialect.
sqlite is skipped because it has special rules for unknown types using
'affinity types' - this feature is tested in that dialect's test spec.
"""
meta = self.metadata
t = Table("test", meta,
Column('foo', sa.DateTime))
ischema_names = testing.db.dialect.ischema_names
t.create()
testing.db.dialect.ischema_names = {}
try:
m2 = MetaData(testing.db)
assert_raises(sa.exc.SAWarning, Table, "test", m2, autoload=True)
@testing.emits_warning('Did not recognize type')
def warns():
m3 = MetaData(testing.db)
t3 = Table("test", m3, autoload=True)
assert t3.c.foo.type.__class__ == sa.types.NullType
finally:
testing.db.dialect.ischema_names = ischema_names
@testing.provide_metadata
def test_basic_override(self):
meta = self.metadata
table = Table(
'override_test', meta,
Column('col1', sa.Integer, primary_key=True),
Column('col2', sa.String(20)),
Column('col3', sa.Numeric)
)
table.create()
meta2 = MetaData(testing.db)
table = Table(
'override_test', meta2,
Column('col2', sa.Unicode()),
Column('col4', sa.String(30)), autoload=True)
self.assert_(isinstance(table.c.col1.type, sa.Integer))
self.assert_(isinstance(table.c.col2.type, sa.Unicode))
self.assert_(isinstance(table.c.col4.type, sa.String))
@testing.provide_metadata
def test_override_upgrade_pk_flag(self):
meta = self.metadata
table = Table(
'override_test', meta,
Column('col1', sa.Integer),
Column('col2', sa.String(20)),
Column('col3', sa.Numeric)
)
table.create()
meta2 = MetaData(testing.db)
table = Table(
'override_test', meta2,
Column('col1', sa.Integer, primary_key=True),
autoload=True)
eq_(list(table.primary_key), [table.c.col1])
eq_(table.c.col1.primary_key, True)
@testing.provide_metadata
def test_override_pkfk(self):
"""test that you can override columns which contain foreign keys
to other reflected tables, where the foreign key column is also
a primary key column"""
meta = self.metadata
Table('users', meta,
Column('id', sa.Integer, primary_key=True),
Column('name', sa.String(30)))
Table('addresses', meta,
Column('id', sa.Integer, primary_key=True),
Column('street', sa.String(30)))
meta.create_all()
meta2 = MetaData(testing.db)
a2 = Table('addresses', meta2,
Column('id', sa.Integer,
sa.ForeignKey('users.id'), primary_key=True),
autoload=True)
u2 = Table('users', meta2, autoload=True)
assert list(a2.primary_key) == [a2.c.id]
assert list(u2.primary_key) == [u2.c.id]
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.id)
meta3 = MetaData(testing.db)
u3 = Table('users', meta3, autoload=True)
a3 = Table('addresses', meta3,
Column('id', sa.Integer, sa.ForeignKey('users.id'),
primary_key=True),
autoload=True)
assert list(a3.primary_key) == [a3.c.id]
assert list(u3.primary_key) == [u3.c.id]
assert u3.join(a3).onclause.compare(u3.c.id == a3.c.id)
@testing.provide_metadata
def test_override_nonexistent_fk(self):
"""test that you can override columns and create new foreign
keys to other reflected tables which have no foreign keys. this
is common with MySQL MyISAM tables."""
meta = self.metadata
Table('users', meta,
Column('id', sa.Integer, primary_key=True),
Column('name', sa.String(30)))
Table('addresses', meta,
Column('id', sa.Integer, primary_key=True),
Column('street', sa.String(30)),
Column('user_id', sa.Integer))
meta.create_all()
meta2 = MetaData(testing.db)
a2 = Table('addresses', meta2,
Column('user_id', sa.Integer, sa.ForeignKey('users.id')),
autoload=True)
u2 = Table('users', meta2, autoload=True)
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.foreign_keys) == 1
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] \
== [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent \
is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
meta3 = MetaData(testing.db)
u3 = Table('users', meta3, autoload=True)
a3 = Table('addresses', meta3, Column('user_id',
sa.Integer, sa.ForeignKey('users.id')),
autoload=True)
assert u3.join(a3).onclause.compare(u3.c.id == a3.c.user_id)
meta4 = MetaData(testing.db)
u4 = Table('users', meta4,
Column('id', sa.Integer, key='u_id', primary_key=True),
autoload=True)
a4 = Table(
'addresses',
meta4,
Column('id', sa.Integer, key='street',
primary_key=True),
Column('street', sa.String(30), key='user_id'),
Column('user_id', sa.Integer, sa.ForeignKey('users.u_id'
), key='id'),
autoload=True,
)
assert u4.join(a4).onclause.compare(u4.c.u_id == a4.c.id)
assert list(u4.primary_key) == [u4.c.u_id]
assert len(u4.columns) == 2
assert len(u4.constraints) == 1
assert len(a4.columns) == 3
assert len(a4.constraints) == 2
@testing.provide_metadata
def test_override_composite_fk(self):
"""Test double-remove of composite foreign key, when replaced."""
metadata = self.metadata
Table('a',
metadata,
Column('x', sa.Integer, primary_key=True),
Column('y', sa.Integer, primary_key=True),
)
Table('b',
metadata,
Column('x', sa.Integer, primary_key=True),
Column('y', sa.Integer, primary_key=True),
sa.ForeignKeyConstraint(['x', 'y'], ['a.x', 'a.y'])
)
metadata.create_all()
meta2 = MetaData()
c1 = Column('x', sa.Integer, primary_key=True)
c2 = Column('y', sa.Integer, primary_key=True)
f1 = sa.ForeignKeyConstraint(['x', 'y'], ['a.x', 'a.y'])
b1 = Table('b',
meta2, c1, c2, f1,
autoload=True,
autoload_with=testing.db
)
assert b1.c.x is c1
assert b1.c.y is c2
assert f1 in b1.constraints
assert len(b1.constraints) == 2
@testing.provide_metadata
def test_override_keys(self):
"""test that columns can be overridden with a 'key',
and that ForeignKey targeting during reflection still works."""
meta = self.metadata
Table('a', meta,
Column('x', sa.Integer, primary_key=True),
Column('z', sa.Integer),
test_needs_fk=True
)
Table('b', meta,
Column('y', sa.Integer, sa.ForeignKey('a.x')),
test_needs_fk=True
)
meta.create_all()
m2 = MetaData(testing.db)
a2 = Table('a', m2,
Column('x', sa.Integer, primary_key=True, key='x1'),
autoload=True)
b2 = Table('b', m2, autoload=True)
assert a2.join(b2).onclause.compare(a2.c.x1 == b2.c.y)
assert b2.c.y.references(a2.c.x1)
@testing.provide_metadata
def test_nonreflected_fk_raises(self):
"""test that a NoReferencedColumnError is raised when reflecting
a table with an FK to another table which has not included the target
column in its reflection.
"""
meta = self.metadata
Table('a', meta,
Column('x', sa.Integer, primary_key=True),
Column('z', sa.Integer),
test_needs_fk=True
)
Table('b', meta,
Column('y', sa.Integer, sa.ForeignKey('a.x')),
test_needs_fk=True
)
meta.create_all()
m2 = MetaData(testing.db)
a2 = Table('a', m2, include_columns=['z'], autoload=True)
b2 = Table('b', m2, autoload=True)
assert_raises(sa.exc.NoReferencedColumnError, a2.join, b2)
@testing.exclude('mysql', '<', (4, 1, 1), 'innodb funkiness')
@testing.provide_metadata
def test_override_existing_fk(self):
"""test that you can override columns and specify new foreign
keys to other reflected tables, on columns which *do* already
have that foreign key, and that the FK is not duped. """
meta = self.metadata
Table('users', meta,
Column('id', sa.Integer, primary_key=True),
Column('name', sa.String(30)),
test_needs_fk=True)
Table('addresses', meta,
Column('id', sa.Integer, primary_key=True),
Column('user_id', sa.Integer, sa.ForeignKey('users.id')),
test_needs_fk=True)
meta.create_all()
meta2 = MetaData(testing.db)
a2 = Table('addresses', meta2,
Column('user_id', sa.Integer, sa.ForeignKey('users.id')),
autoload=True)
u2 = Table('users', meta2, autoload=True)
s = sa.select([a2])
assert s.c.user_id is not None
assert len(a2.foreign_keys) == 1
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.constraints) == 2
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] \
== [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent \
is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
meta2 = MetaData(testing.db)
u2 = Table('users', meta2, Column('id', sa.Integer,
primary_key=True), autoload=True)
a2 = Table('addresses', meta2, Column('id', sa.Integer,
primary_key=True), Column('user_id', sa.Integer,
sa.ForeignKey('users.id')), autoload=True)
s = sa.select([a2])
assert s.c.user_id is not None
assert len(a2.foreign_keys) == 1
assert len(a2.c.user_id.foreign_keys) == 1
assert len(a2.constraints) == 2
assert [c.parent for c in a2.foreign_keys] == [a2.c.user_id]
assert [c.parent for c in a2.c.user_id.foreign_keys] \
== [a2.c.user_id]
assert list(a2.c.user_id.foreign_keys)[0].parent \
is a2.c.user_id
assert u2.join(a2).onclause.compare(u2.c.id == a2.c.user_id)
@testing.only_on(['postgresql', 'mysql'])
@testing.provide_metadata
def test_fk_options(self):
"""test that foreign key reflection includes options (on
backends with {dialect}.get_foreign_keys() support)"""
if testing.against('postgresql'):
test_attrs = ('match', 'onupdate', 'ondelete', 'deferrable', 'initially')
addresses_user_id_fkey = sa.ForeignKey(
# Each option is specifically not a Postgres default, or
# it won't be returned by PG's inspection
'users.id',
name = 'addresses_user_id_fkey',
match='FULL',
onupdate='RESTRICT',
ondelete='RESTRICT',
deferrable=True,
initially='DEFERRED'
)
elif testing.against('mysql'):
# MATCH, DEFERRABLE, and INITIALLY cannot be defined for MySQL
# ON UPDATE and ON DELETE have defaults of RESTRICT, which are
# elided by MySQL's inspection
addresses_user_id_fkey = sa.ForeignKey(
'users.id',
name = 'addresses_user_id_fkey',
onupdate='CASCADE',
ondelete='CASCADE'
)
test_attrs = ('onupdate', 'ondelete')
meta = self.metadata
Table('users', meta,
Column('id', sa.Integer, primary_key=True),
Column('name', sa.String(30)),
test_needs_fk=True)
Table('addresses', meta,
Column('id', sa.Integer, primary_key=True),
Column('user_id', sa.Integer, addresses_user_id_fkey),
test_needs_fk=True)
meta.create_all()
meta2 = MetaData()
meta2.reflect(testing.db)
for fk in meta2.tables['addresses'].foreign_keys:
ref = addresses_user_id_fkey
for attr in test_attrs:
eq_(getattr(fk, attr), getattr(ref, attr))
def test_pks_not_uniques(self):
"""test that primary key reflection not tripped up by unique
indexes"""
testing.db.execute("""
CREATE TABLE book (
id INTEGER NOT NULL,
title VARCHAR(100) NOT NULL,
series INTEGER,
series_id INTEGER,
UNIQUE(series, series_id),
PRIMARY KEY(id)
)""")
try:
metadata = MetaData(bind=testing.db)
book = Table('book', metadata, autoload=True)
assert book.primary_key.contains_column(book.c.id)
assert not book.primary_key.contains_column(book.c.series)
assert len(book.primary_key) == 1
finally:
testing.db.execute("drop table book")
def test_fk_error(self):
metadata = MetaData(testing.db)
Table('slots', metadata,
Column('slot_id', sa.Integer, primary_key=True),
Column('pkg_id', sa.Integer, sa.ForeignKey('pkgs.pkg_id')),
Column('slot', sa.String(128)),
)
assert_raises_message(sa.exc.InvalidRequestError,
"Foreign key associated with column 'slots.pkg_id' "
"could not find table 'pkgs' with which to generate "
"a foreign key to target column 'pkg_id'",
metadata.create_all)
def test_composite_pks(self):
"""test reflection of a composite primary key"""
testing.db.execute("""
CREATE TABLE book (
id INTEGER NOT NULL,
isbn VARCHAR(50) NOT NULL,
title VARCHAR(100) NOT NULL,
series INTEGER NOT NULL,
series_id INTEGER NOT NULL,
UNIQUE(series, series_id),
PRIMARY KEY(id, isbn)
)""")
try:
metadata = MetaData(bind=testing.db)
book = Table('book', metadata, autoload=True)
assert book.primary_key.contains_column(book.c.id)
assert book.primary_key.contains_column(book.c.isbn)
assert not book.primary_key.contains_column(book.c.series)
assert len(book.primary_key) == 2
finally:
testing.db.execute("drop table book")
@testing.exclude('mysql', '<', (4, 1, 1), 'innodb funkiness')
@testing.provide_metadata
def test_composite_fk(self):
"""test reflection of composite foreign keys"""
meta = self.metadata
multi = Table(
'multi', meta,
Column('multi_id', sa.Integer, primary_key=True),
Column('multi_rev', sa.Integer, primary_key=True),
Column('multi_hoho', sa.Integer, primary_key=True),
Column('name', sa.String(50), nullable=False),
Column('val', sa.String(100)),
test_needs_fk=True,
)
multi2 = Table('multi2', meta,
Column('id', sa.Integer, primary_key=True),
Column('foo', sa.Integer),
Column('bar', sa.Integer),
Column('lala', sa.Integer),
Column('data', sa.String(50)),
sa.ForeignKeyConstraint(['foo', 'bar', 'lala'],
['multi.multi_id', 'multi.multi_rev', 'multi.multi_hoho'
]),
test_needs_fk=True,
)
meta.create_all()
meta2 = MetaData()
table = Table('multi', meta2, autoload=True,
autoload_with=testing.db)
table2 = Table('multi2', meta2, autoload=True,
autoload_with=testing.db)
self.assert_tables_equal(multi, table)
self.assert_tables_equal(multi2, table2)
j = sa.join(table, table2)
self.assert_(sa.and_(table.c.multi_id == table2.c.foo,
table.c.multi_rev == table2.c.bar,
table.c.multi_hoho
== table2.c.lala).compare(j.onclause))
@testing.crashes('oracle', 'FIXME: unknown, confirm not fails_on')
@testing.provide_metadata
def test_reserved(self):
# check a table that uses an SQL reserved name doesn't cause an
# error
meta = self.metadata
table_a = Table('select', meta, Column('not', sa.Integer,
primary_key=True), Column('from',
sa.String(12), nullable=False),
sa.UniqueConstraint('from', name='when'))
sa.Index('where', table_a.c['from'])
# There's currently no way to calculate identifier case
# normalization in isolation, so...
if testing.against('firebird', 'oracle'):
check_col = 'TRUE'
else:
check_col = 'true'
quoter = meta.bind.dialect.identifier_preparer.quote_identifier
Table('false', meta,
Column('create', sa.Integer, primary_key=True),
Column('true', sa.Integer, sa.ForeignKey('select.not')),
sa.CheckConstraint('%s <> 1'
% quoter(check_col), name='limit')
)
table_c = Table('is', meta,
Column('or', sa.Integer, nullable=False, primary_key=True),
Column('join', sa.Integer, nullable=False, primary_key=True),
sa.PrimaryKeyConstraint('or', 'join', name='to')
)
index_c = sa.Index('else', table_c.c.join)
meta.create_all()
index_c.drop()
meta2 = MetaData(testing.db)
Table('select', meta2, autoload=True)
Table('false', meta2, autoload=True)
Table('is', meta2, autoload=True)
@testing.provide_metadata
def _test_reflect_uses_bind(self, fn):
from sqlalchemy.pool import AssertionPool
e = engines.testing_engine(options={"poolclass": AssertionPool})
fn(e)
@testing.uses_deprecated()
def test_reflect_uses_bind_constructor_conn(self):
self._test_reflect_uses_bind(lambda e: MetaData(e.connect(),
reflect=True))
@testing.uses_deprecated()
def test_reflect_uses_bind_constructor_engine(self):
self._test_reflect_uses_bind(lambda e: MetaData(e, reflect=True))
def test_reflect_uses_bind_constructor_conn_reflect(self):
self._test_reflect_uses_bind(lambda e: MetaData(e.connect()).reflect())
def test_reflect_uses_bind_constructor_engine_reflect(self):
self._test_reflect_uses_bind(lambda e: MetaData(e).reflect())
def test_reflect_uses_bind_conn_reflect(self):
self._test_reflect_uses_bind(lambda e: MetaData().reflect(e.connect()))
def test_reflect_uses_bind_engine_reflect(self):
self._test_reflect_uses_bind(lambda e: MetaData().reflect(e))
@testing.provide_metadata
def test_reflect_all(self):
existing = testing.db.table_names()
names = ['rt_%s' % name for name in ('a', 'b', 'c', 'd', 'e')]
nameset = set(names)
for name in names:
# be sure our starting environment is sane
self.assert_(name not in existing)
self.assert_('rt_f' not in existing)
baseline = self.metadata
for name in names:
Table(name, baseline, Column('id', sa.Integer, primary_key=True))
baseline.create_all()
m1 = MetaData(testing.db)
self.assert_(not m1.tables)
m1.reflect()
self.assert_(nameset.issubset(set(m1.tables.keys())))
m2 = MetaData()
m2.reflect(testing.db, only=['rt_a', 'rt_b'])
self.assert_(set(m2.tables.keys()) == set(['rt_a', 'rt_b']))
m3 = MetaData()
c = testing.db.connect()
m3.reflect(bind=c, only=lambda name, meta: name == 'rt_c')
self.assert_(set(m3.tables.keys()) == set(['rt_c']))
m4 = MetaData(testing.db)
try:
m4.reflect(only=['rt_a', 'rt_f'])
self.assert_(False)
except sa.exc.InvalidRequestError as e:
self.assert_(e.args[0].endswith('(rt_f)'))
m5 = MetaData(testing.db)
m5.reflect(only=[])
self.assert_(not m5.tables)
m6 = MetaData(testing.db)
m6.reflect(only=lambda n, m: False)
self.assert_(not m6.tables)
m7 = MetaData(testing.db)
m7.reflect()
self.assert_(nameset.issubset(set(m7.tables.keys())))
m8 = MetaData()
assert_raises(
sa.exc.UnboundExecutionError,
m8.reflect
)
m8_e1 = MetaData(testing.db)
rt_c = Table('rt_c', m8_e1)
m8_e1.reflect(extend_existing=True)
eq_(set(m8_e1.tables.keys()), set(names))
eq_(rt_c.c.keys(), ['id'])
m8_e2 = MetaData(testing.db)
rt_c = Table('rt_c', m8_e2)
m8_e2.reflect(extend_existing=True, only=['rt_a', 'rt_c'])
eq_(set(m8_e2.tables.keys()), set(['rt_a', 'rt_c']))
eq_(rt_c.c.keys(), ['id'])
if existing:
print("Other tables present in database, skipping some checks.")
else:
baseline.drop_all()
m9 = MetaData(testing.db)
m9.reflect()
self.assert_(not m9.tables)
def test_reflect_all_conn_closing(self):
m1 = MetaData()
c = testing.db.connect()
m1.reflect(bind=c)
assert not c.closed
def test_inspector_conn_closing(self):
c = testing.db.connect()
inspect(c)
assert not c.closed
@testing.provide_metadata
def test_index_reflection(self):
m1 = self.metadata
t1 = Table('party', m1,
Column('id', sa.Integer, nullable=False),
Column('name', sa.String(20), index=True)
)
sa.Index('idx1', t1.c.id, unique=True)
sa.Index('idx2', t1.c.name, t1.c.id, unique=False)
m1.create_all()
m2 = MetaData(testing.db)
t2 = Table('party', m2, autoload=True)
assert len(t2.indexes) == 3
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2, r3 = [idx[1] for idx in tmp]
assert r1.name == 'idx1'
assert r2.name == 'idx2'
assert r1.unique == True
assert r2.unique == False
assert r3.unique == False
assert set([t2.c.id]) == set(r1.columns)
assert set([t2.c.name, t2.c.id]) == set(r2.columns)
assert set([t2.c.name]) == set(r3.columns)
@testing.requires.views
@testing.provide_metadata
def test_views(self):
metadata = self.metadata
users, addresses, dingalings = createTables(metadata)
try:
metadata.create_all()
_create_views(metadata.bind, None)
m2 = MetaData(testing.db)
users_v = Table("users_v", m2, autoload=True)
addresses_v = Table("email_addresses_v", m2, autoload=True)
for c1, c2 in zip(users_v.c, users.c):
eq_(c1.name, c2.name)
self.assert_types_base(c1, c2)
for c1, c2 in zip(addresses_v.c, addresses.c):
eq_(c1.name, c2.name)
self.assert_types_base(c1, c2)
finally:
_drop_views(metadata.bind)
@testing.requires.views
@testing.provide_metadata
def test_reflect_all_with_views(self):
metadata = self.metadata
users, addresses, dingalings = createTables(metadata, None)
try:
metadata.create_all()
_create_views(metadata.bind, None)
m2 = MetaData(testing.db)
m2.reflect(views=False)
eq_(
set(m2.tables),
set(['users', 'email_addresses', 'dingalings'])
)
m2 = MetaData(testing.db)
m2.reflect(views=True)
eq_(
set(m2.tables),
set(['email_addresses_v', 'users_v',
'users', 'dingalings', 'email_addresses'])
)
finally:
_drop_views(metadata.bind)
class CreateDropTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, users
metadata = MetaData()
users = Table('users', metadata,
Column('user_id', sa.Integer,
sa.Sequence('user_id_seq', optional=True),
primary_key=True),
Column('user_name', sa.String(40)))
Table('email_addresses', metadata,
Column('address_id', sa.Integer,
sa.Sequence('address_id_seq', optional=True),
primary_key=True),
Column('user_id',
sa.Integer, sa.ForeignKey(users.c.user_id)),
Column('email_address', sa.String(40)))
Table(
'orders',
metadata,
Column('order_id', sa.Integer, sa.Sequence('order_id_seq',
optional=True), primary_key=True),
Column('user_id', sa.Integer,
sa.ForeignKey(users.c.user_id)),
Column('description', sa.String(50)),
Column('isopen', sa.Integer),
)
Table('items', metadata,
Column('item_id', sa.INT,
sa.Sequence('items_id_seq', optional=True),
primary_key=True),
Column('order_id',
sa.INT, sa.ForeignKey('orders')),
Column('item_name', sa.VARCHAR(50)))
def test_sorter(self):
tables = metadata.sorted_tables
table_names = [t.name for t in tables]
ua = [n for n in table_names if n in ('users', 'email_addresses')]
oi = [n for n in table_names if n in ('orders', 'items')]
eq_(ua, ['users', 'email_addresses'])
eq_(oi, ['orders', 'items'])
def testcheckfirst(self):
try:
assert not users.exists(testing.db)
users.create(bind=testing.db)
assert users.exists(testing.db)
users.create(bind=testing.db, checkfirst=True)
users.drop(bind=testing.db)
users.drop(bind=testing.db, checkfirst=True)
assert not users.exists(bind=testing.db)
users.create(bind=testing.db, checkfirst=True)
users.drop(bind=testing.db)
finally:
metadata.drop_all(bind=testing.db)
def test_createdrop(self):
metadata.create_all(bind=testing.db)
eq_(testing.db.has_table('items'), True)
eq_(testing.db.has_table('email_addresses'), True)
metadata.create_all(bind=testing.db)
eq_(testing.db.has_table('items'), True)
metadata.drop_all(bind=testing.db)
eq_(testing.db.has_table('items'), False)
eq_(testing.db.has_table('email_addresses'), False)
metadata.drop_all(bind=testing.db)
eq_(testing.db.has_table('items'), False)
def test_tablenames(self):
metadata.create_all(bind=testing.db)
# we only check to see if all the explicitly created tables are
# there, rather than assertEqual -- the test db could have
# "extra" tables if there is a misconfigured template. (*cough*
# tsearch2 w/ the pg windows installer.)
self.assert_(not set(metadata.tables)
- set(testing.db.table_names()))
metadata.drop_all(bind=testing.db)
class SchemaManipulationTest(fixtures.TestBase):
__backend__ = True
def test_append_constraint_unique(self):
meta = MetaData()
users = Table('users', meta, Column('id', sa.Integer))
addresses = Table('addresses', meta,
Column('id', sa.Integer),
Column('user_id', sa.Integer))
fk = sa.ForeignKeyConstraint(['user_id'], [users.c.id])
addresses.append_constraint(fk)
addresses.append_constraint(fk)
assert len(addresses.c.user_id.foreign_keys) == 1
assert addresses.constraints == set([addresses.primary_key, fk])
class UnicodeReflectionTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
cls.metadata = metadata = MetaData()
no_multibyte_period = set([
('plain', 'col_plain', 'ix_plain')
])
no_has_table = [
('no_has_table_1', ue('col_Unit\u00e9ble'), ue('ix_Unit\u00e9ble')),
('no_has_table_2', ue('col_\u6e2c\u8a66'), ue('ix_\u6e2c\u8a66')),
]
no_case_sensitivity = [
(ue('\u6e2c\u8a66'), ue('col_\u6e2c\u8a66'), ue('ix_\u6e2c\u8a66')),
(ue('unit\u00e9ble'), ue('col_unit\u00e9ble'), ue('ix_unit\u00e9ble')),
]
full = [
(ue('Unit\u00e9ble'), ue('col_Unit\u00e9ble'), ue('ix_Unit\u00e9ble')),
(ue('\u6e2c\u8a66'), ue('col_\u6e2c\u8a66'), ue('ix_\u6e2c\u8a66')),
]
# as you can see, our options for this kind of thing
# are really limited unless you're on PG or SQLite
# forget about it on these backends
if not testing.requires.unicode_ddl.enabled:
names = no_multibyte_period
# mysql can't handle casing usually
elif testing.against("mysql") and \
not testing.requires.mysql_fully_case_sensitive.enabled:
names = no_multibyte_period.union(no_case_sensitivity)
# mssql + pyodbc + freetds can't compare multibyte names to
# information_schema.tables.table_name
elif testing.against("mssql"):
names = no_multibyte_period.union(no_has_table)
else:
names = no_multibyte_period.union(full)
for tname, cname, ixname in names:
t = Table(tname, metadata,
Column('id', sa.Integer,
sa.Sequence(cname + '_id_seq'),
primary_key=True),
Column(cname, Integer)
)
schema.Index(ixname, t.c[cname])
metadata.create_all(testing.db)
cls.names = names
@classmethod
def teardown_class(cls):
cls.metadata.drop_all(testing.db, checkfirst=False)
@testing.requires.unicode_connections
def test_has_table(self):
for tname, cname, ixname in self.names:
assert testing.db.has_table(tname), "Can't detect name %s" % tname
@testing.requires.unicode_connections
def test_basic(self):
# the 'convert_unicode' should not get in the way of the
# reflection process. reflecttable for oracle, postgresql
# (others?) expect non-unicode strings in result sets/bind
# params
bind = testing.db
names = set([rec[0] for rec in self.names])
reflected = set(bind.table_names())
# Jython 2.5 on Java 5 lacks unicodedata.normalize
if not names.issubset(reflected) and hasattr(unicodedata, 'normalize'):
# Python source files in the utf-8 coding seem to
# normalize literals as NFC (and the above are
# explicitly NFC). Maybe this database normalizes NFD
# on reflection.
nfc = set([unicodedata.normalize('NFC', n) for n in names])
self.assert_(nfc == names)
# Yep. But still ensure that bulk reflection and
# create/drop work with either normalization.
r = MetaData(bind)
r.reflect()
r.drop_all(checkfirst=False)
r.create_all(checkfirst=False)
@testing.requires.unicode_connections
def test_get_names(self):
inspector = inspect(testing.db)
names = dict(
(tname, (cname, ixname)) for tname, cname, ixname in self.names
)
for tname in inspector.get_table_names():
assert tname in names
eq_(
[
(rec['name'], rec['column_names'][0])
for rec in inspector.get_indexes(tname)
],
[(names[tname][1], names[tname][0])]
)
class SchemaTest(fixtures.TestBase):
__backend__ = True
@testing.requires.schemas
@testing.requires.cross_schema_fk_reflection
def test_has_schema(self):
eq_(testing.db.dialect.has_schema(testing.db, 'test_schema'), True)
eq_(testing.db.dialect.has_schema(testing.db, 'sa_fake_schema_123'), False)
@testing.requires.schemas
@testing.fails_on('sqlite', 'FIXME: unknown')
@testing.fails_on('sybase', 'FIXME: unknown')
def test_explicit_default_schema(self):
engine = testing.db
engine.connect().close()
if testing.against('sqlite'):
# Works for CREATE TABLE main.foo, SELECT FROM main.foo, etc.,
# but fails on:
# FOREIGN KEY(col2) REFERENCES main.table1 (col1)
schema = 'main'
else:
schema = engine.dialect.default_schema_name
assert bool(schema)
metadata = MetaData(engine)
Table('table1', metadata,
Column('col1', sa.Integer, primary_key=True),
test_needs_fk=True,
schema=schema)
Table('table2', metadata,
Column('col1', sa.Integer, primary_key=True),
Column('col2', sa.Integer,
sa.ForeignKey('%s.table1.col1' % schema)),
test_needs_fk=True,
schema=schema)
try:
metadata.create_all()
metadata.create_all(checkfirst=True)
assert len(metadata.tables) == 2
metadata.clear()
Table('table1', metadata, autoload=True, schema=schema)
Table('table2', metadata, autoload=True, schema=schema)
assert len(metadata.tables) == 2
finally:
metadata.drop_all()
@testing.requires.schemas
@testing.fails_on('sybase', 'FIXME: unknown')
def test_explicit_default_schema_metadata(self):
engine = testing.db
if testing.against('sqlite'):
# Works for CREATE TABLE main.foo, SELECT FROM main.foo, etc.,
# but fails on:
# FOREIGN KEY(col2) REFERENCES main.table1 (col1)
schema = 'main'
else:
schema = engine.dialect.default_schema_name
assert bool(schema)
metadata = MetaData(engine, schema=schema)
Table('table1', metadata,
Column('col1', sa.Integer, primary_key=True),
test_needs_fk=True)
Table('table2', metadata,
Column('col1', sa.Integer, primary_key=True),
Column('col2', sa.Integer,
sa.ForeignKey('table1.col1')),
test_needs_fk=True)
try:
metadata.create_all()
metadata.create_all(checkfirst=True)
assert len(metadata.tables) == 2
metadata.clear()
Table('table1', metadata, autoload=True)
Table('table2', metadata, autoload=True)
assert len(metadata.tables) == 2
finally:
metadata.drop_all()
@testing.requires.schemas
@testing.provide_metadata
def test_metadata_reflect_schema(self):
metadata = self.metadata
createTables(metadata, "test_schema")
metadata.create_all()
m2 = MetaData(schema="test_schema", bind=testing.db)
m2.reflect()
eq_(
set(m2.tables),
set(['test_schema.dingalings', 'test_schema.users',
'test_schema.email_addresses'])
)
@testing.requires.schemas
@testing.requires.cross_schema_fk_reflection
@testing.provide_metadata
def test_reflect_all_schemas_default_overlap(self):
t1 = Table('t', self.metadata,
Column('id', Integer, primary_key=True))
t2 = Table('t', self.metadata,
Column('id1', sa.ForeignKey('t.id')),
schema="test_schema"
)
self.metadata.create_all()
m2 = MetaData()
m2.reflect(testing.db, schema="test_schema")
m3 = MetaData()
m3.reflect(testing.db)
m3.reflect(testing.db, schema="test_schema")
eq_(
set((t.name, t.schema) for t in m2.tables.values()),
set((t.name, t.schema) for t in m3.tables.values())
)
# Tests related to engine.reflection
def createTables(meta, schema=None):
if schema:
schema_prefix = schema + "."
else:
schema_prefix = ""
users = Table('users', meta,
Column('user_id', sa.INT, primary_key=True),
Column('user_name', sa.VARCHAR(20), nullable=False),
Column('test1', sa.CHAR(5), nullable=False),
Column('test2', sa.Float(5), nullable=False),
Column('test3', sa.Text),
Column('test4', sa.Numeric(10, 2), nullable=False),
Column('test5', sa.Date),
Column('test5_1', sa.TIMESTAMP),
Column('parent_user_id', sa.Integer,
sa.ForeignKey('%susers.user_id' % schema_prefix)),
Column('test6', sa.Date, nullable=False),
Column('test7', sa.Text),
Column('test8', sa.LargeBinary),
Column('test_passivedefault2', sa.Integer, server_default='5'),
Column('test9', sa.LargeBinary(100)),
Column('test10', sa.Numeric(10, 2)),
schema=schema,
test_needs_fk=True,
)
dingalings = Table("dingalings", meta,
Column('dingaling_id', sa.Integer, primary_key=True),
Column('address_id', sa.Integer,
sa.ForeignKey('%semail_addresses.address_id' % schema_prefix)),
Column('data', sa.String(30)),
schema=schema, test_needs_fk=True,
)
addresses = Table('email_addresses', meta,
Column('address_id', sa.Integer),
Column('remote_user_id', sa.Integer,
sa.ForeignKey(users.c.user_id)),
Column('email_address', sa.String(20)),
sa.PrimaryKeyConstraint('address_id', name='email_ad_pk'),
schema=schema,
test_needs_fk=True,
)
return (users, addresses, dingalings)
def createIndexes(con, schema=None):
fullname = 'users'
if schema:
fullname = "%s.%s" % (schema, 'users')
query = "CREATE INDEX users_t_idx ON %s (test1, test2)" % fullname
con.execute(sa.sql.text(query))
@testing.requires.views
def _create_views(con, schema=None):
for table_name in ('users', 'email_addresses'):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + '_v'
query = "CREATE VIEW %s AS SELECT * FROM %s" % (view_name,
fullname)
con.execute(sa.sql.text(query))
@testing.requires.views
def _drop_views(con, schema=None):
for table_name in ('email_addresses', 'users'):
fullname = table_name
if schema:
fullname = "%s.%s" % (schema, table_name)
view_name = fullname + '_v'
query = "DROP VIEW %s" % view_name
con.execute(sa.sql.text(query))
class ReverseCasingReflectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
__backend__ = True
@testing.requires.denormalized_names
def setup(self):
testing.db.execute("""
CREATE TABLE weird_casing(
col1 char(20),
"Col2" char(20),
"col3" char(20)
)
""")
@testing.requires.denormalized_names
def teardown(self):
testing.db.execute("drop table weird_casing")
@testing.requires.denormalized_names
def test_direct_quoting(self):
m = MetaData(testing.db)
t = Table('weird_casing', m, autoload=True)
self.assert_compile(t.select(),
'SELECT weird_casing.col1, '
'weird_casing."Col2", weird_casing."col3" '
'FROM weird_casing')
class CaseSensitiveTest(fixtures.TablesTest):
"""Nail down case sensitive behaviors, mostly on MySQL."""
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('SomeTable', metadata,
Column('x', Integer, primary_key=True),
test_needs_fk=True
)
Table('SomeOtherTable', metadata,
Column('x', Integer, primary_key=True),
Column('y', Integer, sa.ForeignKey("SomeTable.x")),
test_needs_fk=True
)
@testing.fails_if(testing.requires._has_mysql_on_windows)
def test_table_names(self):
x = testing.db.run_callable(
testing.db.dialect.get_table_names
)
assert set(["SomeTable", "SomeOtherTable"]).issubset(x)
def test_reflect_exact_name(self):
m = MetaData()
t1 = Table("SomeTable", m, autoload=True, autoload_with=testing.db)
eq_(t1.name, "SomeTable")
assert t1.c.x is not None
@testing.fails_if(lambda:
testing.against(('mysql', '<', (5, 5))) and
not testing.requires._has_mysql_fully_case_sensitive()
)
def test_reflect_via_fk(self):
m = MetaData()
t2 = Table("SomeOtherTable", m, autoload=True, autoload_with=testing.db)
eq_(t2.name, "SomeOtherTable")
assert "SomeTable" in m.tables
@testing.fails_if(testing.requires._has_mysql_fully_case_sensitive)
@testing.fails_on_everything_except('sqlite', 'mysql', 'mssql')
def test_reflect_case_insensitive(self):
m = MetaData()
t2 = Table("sOmEtAbLe", m, autoload=True, autoload_with=testing.db)
eq_(t2.name, "sOmEtAbLe")
class ColumnEventsTest(fixtures.RemovesEvents, fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
cls.metadata = MetaData()
cls.to_reflect = Table(
'to_reflect',
cls.metadata,
Column('x', sa.Integer, primary_key=True),
Column('y', sa.Integer),
test_needs_fk=True
)
cls.related = Table(
'related',
cls.metadata,
Column('q', sa.Integer, sa.ForeignKey('to_reflect.x')),
test_needs_fk=True
)
sa.Index("some_index", cls.to_reflect.c.y)
cls.metadata.create_all(testing.db)
@classmethod
def teardown_class(cls):
cls.metadata.drop_all(testing.db)
def _do_test(self, col, update, assert_, tablename="to_reflect"):
# load the actual Table class, not the test
# wrapper
from sqlalchemy.schema import Table
m = MetaData(testing.db)
def column_reflect(insp, table, column_info):
if column_info['name'] == col:
column_info.update(update)
t = Table(tablename, m, autoload=True, listeners=[
('column_reflect', column_reflect),
])
assert_(t)
m = MetaData(testing.db)
self.event_listen(Table, 'column_reflect', column_reflect)
t2 = Table(tablename, m, autoload=True)
assert_(t2)
def test_override_key(self):
def assertions(table):
eq_(table.c.YXZ.name, "x")
eq_(set(table.primary_key), set([table.c.YXZ]))
self._do_test(
"x", {"key": "YXZ"},
assertions
)
def test_override_index(self):
def assertions(table):
idx = list(table.indexes)[0]
eq_(idx.columns, [table.c.YXZ])
self._do_test(
"y", {"key": "YXZ"},
assertions
)
def test_override_key_fk(self):
m = MetaData(testing.db)
def column_reflect(insp, table, column_info):
if column_info['name'] == 'q':
column_info['key'] = 'qyz'
elif column_info['name'] == 'x':
column_info['key'] = 'xyz'
to_reflect = Table("to_reflect", m, autoload=True, listeners=[
('column_reflect', column_reflect),
])
related = Table("related", m, autoload=True, listeners=[
('column_reflect', column_reflect),
])
assert related.c.qyz.references(to_reflect.c.xyz)
def test_override_type(self):
def assert_(table):
assert isinstance(table.c.x.type, sa.String)
self._do_test(
"x", {"type": sa.String},
assert_
)
def test_override_info(self):
self._do_test(
"x", {"info": {"a": "b"}},
lambda table: eq_(table.c.x.info, {"a": "b"})
)
| 36.071429
| 85
| 0.564529
|
a9387754a517098fc9d619835b0e551efa7a40dc
| 4,330
|
py
|
Python
|
src/python/WMCore/WorkQueue/WorkQueueUtils.py
|
tslazarova/WMCore
|
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
|
[
"Apache-2.0"
] | 1
|
2015-02-05T13:43:46.000Z
|
2015-02-05T13:43:46.000Z
|
src/python/WMCore/WorkQueue/WorkQueueUtils.py
|
tslazarova/WMCore
|
a09e2aefe700fb9b0d12b9f7089b21bde5a5bd62
|
[
"Apache-2.0"
] | 1
|
2016-10-13T14:57:35.000Z
|
2016-10-13T14:57:35.000Z
|
src/python/WMCore/WorkQueue/WorkQueueUtils.py
|
juztas/WMCore
|
f7e830a573d50fb1d7240797f18d809f994b934d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Various helper functions for workqueue"""
import logging
import os
from WMCore.Services.CRIC.CRIC import CRIC
__dbses = {}
def get_dbs(url):
"""Return DBS object for url"""
try:
return __dbses[url]
except KeyError:
from WMCore.Services.DBS.DBSReader import DBSReader
__dbses[url] = DBSReader(url)
return __dbses[url]
__cric = None
__cmsSiteNames = []
def cmsSiteNames():
"""Get all cms sites"""
global __cmsSiteNames
if __cmsSiteNames:
return __cmsSiteNames
global __cric
if not __cric:
__cric = CRIC()
try:
__cmsSiteNames = __cric.getAllPSNs()
except Exception:
pass
return __cmsSiteNames
def makeLocationsList(siteWhitelist, siteBlacklist):
"""
_makeLocationsList_
Make a location list based on the intersection between a site white list
and blacklist, if none specified then all sites are listed.
"""
sites = cmsSiteNames()
if siteWhitelist:
# Just get the CMS sites matching the whitelists
sites = list(set(sites) & set(siteWhitelist))
if siteBlacklist:
# Get all CMS sites less the blacklist
sites = list(set(sites) - set(siteBlacklist))
return sites
def queueFromConfig(config):
"""Create a queue from the config object"""
config = queueConfigFromConfigObject(config)
if config.WorkQueueManager.level == 'GlobalQueue':
from WMCore.WorkQueue.WorkQueue import globalQueue
return globalQueue(**config.WorkQueueManager.queueParams)
elif config.WorkQueueManager.level == 'LocalQueue':
from WMCore.WorkQueue.WorkQueue import localQueue
return localQueue(**config.WorkQueueManager.queueParams)
else:
from WMCore.WorkQueue.WorkQueue import WorkQueue
return WorkQueue(**config.WorkQueueManager.queueParams)
def queueConfigFromConfigObject(config):
"""From a config object create a config dict suitable for a queue object"""
from os import path
wqManager = config.section_('WorkQueueManager')
if not hasattr(wqManager, 'componentDir'):
wqManager.componentDir = path.join(config.General.WorkDir,
'WorkQueueManager')
if not hasattr(wqManager, 'namespace'):
wqManager.namespace = 'WMComponent.WorkQueueManager.WorkQueueManager'
if not hasattr(wqManager, 'logLevel'):
wqManager.logLevel = 'INFO'
if not hasattr(wqManager, 'pollInterval'):
wqManager.pollInterval = 600
# WorkQueue config
if not hasattr(wqManager, 'queueParams'):
wqManager.queueParams = {}
qConfig = wqManager.queueParams
qConfig['rucioAccount'] = getattr(config.General, "rucioAccount", "")
if hasattr(wqManager, 'couchurl'):
qConfig['CouchUrl'] = wqManager.couchurl
if hasattr(wqManager, 'dbname'):
qConfig['DbName'] = wqManager.dbname
if hasattr(wqManager, 'inboxDatabase'):
qConfig['InboxDbName'] = wqManager.inboxDatabase
# pull some info we need from other areas of the config
if "BossAirConfig" not in qConfig and hasattr(config, 'BossAir'):
qConfig["BossAirConfig"] = config
qConfig['BossAirConfig'].section_("Agent").agentName = config.Agent.agentName
if "JobDumpConfig" not in qConfig and hasattr(config, 'JobStateMachine'):
qConfig["JobDumpConfig"] = config
if "CacheDir" not in qConfig and getattr(config.WorkQueueManager, 'componentDir', None):
qConfig['CacheDir'] = os.path.join(config.WorkQueueManager.componentDir, 'cache')
if 'Team' not in qConfig and hasattr(config.Agent, 'teamName'):
qConfig['Team'] = config.Agent.teamName
if 'logger' not in qConfig:
import threading
myThread = threading.currentThread()
if not hasattr(myThread, 'logger'):
loggingLevelName = getattr(wqManager, 'logLevel', 'INFO')
logging.basicConfig(format='%(asctime)-15s %(levelname)-8s %(module)s: %(message)s',
level=getattr(logging, loggingLevelName))
myThread.logger = logging.getLogger('workqueue')
qConfig['logger'] = myThread.logger
# ReqMgr params
if not hasattr(wqManager, 'reqMgrConfig'):
wqManager.reqMgrConfig = {}
return config
| 34.365079
| 96
| 0.674596
|
326c2c919496182695017ed03b6fadbadd6c698f
| 1,676
|
py
|
Python
|
config/wsgi.py
|
tomcentrate/TestDjango
|
8b1a47d01ccd527ec37e905fce798523471bc603
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
tomcentrate/TestDjango
|
8b1a47d01ccd527ec37e905fce798523471bc603
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
tomcentrate/TestDjango
|
8b1a47d01ccd527ec37e905fce798523471bc603
|
[
"MIT"
] | null | null | null |
"""
WSGI config for Project Name project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# test_django directory.
app_path = os.path.dirname(os.path.abspath(__file__)).replace('/config', '')
sys.path.append(os.path.join(app_path, 'test_django'))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 39.904762
| 79
| 0.797136
|
eb59e6dd999de46bd248b61e0c0772af33ccf30b
| 1,075
|
py
|
Python
|
zulip/integrations/jabber/jabber_mirror.py
|
iishiishii/python-zulip-api
|
8500a3238739a080e1809e204c54685437631457
|
[
"Apache-2.0"
] | null | null | null |
zulip/integrations/jabber/jabber_mirror.py
|
iishiishii/python-zulip-api
|
8500a3238739a080e1809e204c54685437631457
|
[
"Apache-2.0"
] | null | null | null |
zulip/integrations/jabber/jabber_mirror.py
|
iishiishii/python-zulip-api
|
8500a3238739a080e1809e204c54685437631457
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import subprocess
import os
import traceback
import signal
from types import FrameType
from typing import Any
from zulip import RandomExponentialBackoff
def die(signal, frame):
# type: (int, FrameType) -> None
"""We actually want to exit, so run os._exit (so as not to be caught and restarted)"""
os._exit(1)
signal.signal(signal.SIGINT, die)
args = [os.path.join(os.path.dirname(sys.argv[0]), "jabber_mirror_backend.py")]
args.extend(sys.argv[1:])
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print("Starting Jabber mirroring bot")
try:
ret = subprocess.call(args)
except Exception:
traceback.print_exc()
else:
if ret == 2:
# Don't try again on initial configuration errors
sys.exit(ret)
backoff.fail()
print("")
print("")
print("ERROR: The Jabber mirroring bot is unable to continue mirroring Jabber.")
print("Please contact zulip-devel@googlegroups.com if you need assistance.")
print("")
sys.exit(1)
| 25.595238
| 90
| 0.704186
|
694bbb832b2e17d5774fbe0b43c3e317a49da3d2
| 250
|
py
|
Python
|
server/lib/python3.9/site-packages/stripe/api_resources/reporting/report_type.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 1,078
|
2015-01-06T03:35:05.000Z
|
2022-03-25T13:25:48.000Z
|
server/lib/python3.9/site-packages/stripe/api_resources/reporting/report_type.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 558
|
2015-01-07T19:05:02.000Z
|
2022-03-28T22:19:24.000Z
|
server/lib/python3.9/site-packages/stripe/api_resources/reporting/report_type.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 382
|
2015-01-04T14:06:09.000Z
|
2022-03-16T04:52:04.000Z
|
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe.api_resources.abstract import ListableAPIResource
class ReportType(ListableAPIResource):
OBJECT_NAME = "reporting.report_type"
| 27.777778
| 64
| 0.836
|
3b4cd9582aa6fa7c62058fb11a92792ea8d88805
| 3,251
|
py
|
Python
|
PyFunceble/dataset/whois/base.py
|
spirillen/PyFunceble
|
f5188532dadb20a01d453e775825b0e0cfb64fb1
|
[
"Apache-2.0"
] | 2
|
2021-09-24T21:46:56.000Z
|
2021-12-19T13:50:14.000Z
|
PyFunceble/dataset/whois/base.py
|
spirillen/PyFunceble
|
f5188532dadb20a01d453e775825b0e0cfb64fb1
|
[
"Apache-2.0"
] | 33
|
2020-09-20T12:16:23.000Z
|
2021-06-13T17:45:58.000Z
|
PyFunceble/dataset/whois/base.py
|
spirillen/PyFunceble
|
f5188532dadb20a01d453e775825b0e0cfb64fb1
|
[
"Apache-2.0"
] | null | null | null |
"""
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the base of all WHOIS related dataset.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from typing import List, Union
from PyFunceble.database.sqlalchemy.all_schemas import WhoisRecord
from PyFunceble.dataset.db_base import DBDatasetBase
class WhoisDatasetBase(DBDatasetBase):
"""
Provides the base of all Whois related interface.
"""
FIELDS: List[str] = [
"subject",
"idna_subject",
"expiration_date",
"epoch",
"registrar",
]
COMPARISON_FIELDS: List[str] = ["subject", "idna_subject"]
@staticmethod
def is_expired(row: Union[dict, WhoisRecord]) -> bool:
"""
Given a row, we look if the row is expired.
"""
if isinstance(row, WhoisRecord):
to_check = row.epoch
elif "epoch" in row:
to_check = row["epoch"]
else:
return True
return datetime.utcnow() > datetime.fromtimestamp(float(to_check))
@DBDatasetBase.execute_if_authorized(None)
def get_filtered_row(self, row: Union[dict, WhoisRecord]) -> dict:
"""
Removes all unkowns fields (not declared) from the given row.
:param row:
The row to work with.
"""
if isinstance(row, WhoisDatasetBase):
row = row.to_dict()
result = super().get_filtered_row(row)
if "epoch" in result and isinstance(result["epoch"], float):
# We do this here because we have to convert to a string in
# order to be able to write into the CSV file.
result["epoch"] = str(result["epoch"])
return result
| 29.554545
| 88
| 0.552753
|
3fdadde68472ce72561f6348e7ad8507d5b3df8b
| 2,027
|
py
|
Python
|
AutoEncoder/basicAE.py
|
wangyarui/deep-learning
|
0e6db09d5cd9c12bfb07dee09dc086a5d7eb759a
|
[
"Unlicense"
] | 1
|
2017-09-23T02:48:21.000Z
|
2017-09-23T02:48:21.000Z
|
AutoEncoder/basicAE.py
|
wangyarui/deep-learning
|
0e6db09d5cd9c12bfb07dee09dc086a5d7eb759a
|
[
"Unlicense"
] | 1
|
2019-04-08T00:33:02.000Z
|
2019-07-24T08:44:18.000Z
|
AutoEncoder/basicAE.py
|
wangyarui/deep-learning
|
0e6db09d5cd9c12bfb07dee09dc086a5d7eb759a
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
print('TensorFlow version: %s' % tf.__version__)
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0, one_hot=False)
img = mnist.train.images[20]
plt.imshow(img.reshape((28,28)))
#plt.axis('off')
#plt.show()
hidden_units = 64
input_units = mnist.train.images.shape[1]
inputs_ = tf.placeholder(tf.float32, (None, input_units), name='inputs_')
targets_ = tf.placeholder(tf.float32, (None, input_units), name='targets_')
hidden_ = tf.layers.dense(inputs_, hidden_units, activation=tf.nn.relu)
logits_ = tf.layers.dense(hidden_, input_units, activation=None)
outputs_ = tf.sigmoid(logits_, name='outputs_')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_)
cost = tf.reduce_mean(loss)
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
sess = tf.Session()
epochs = 20
batch_size = 128
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for idx in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size) # 获取下一个batch
batch_cost, _ = sess.run([cost, optimizer],
feed_dict={inputs_: batch[0],
targets_: batch[0]})
print("Epoch: {}/{}".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True, figsize=(20,8))
test_imgs = mnist.test.images[:5]
reconstructed, compressed = sess.run([outputs_, hidden_],
feed_dict={inputs_: test_imgs})
for image, row in zip([test_imgs, reconstructed], axes):
for img, ax in zip(image, row):
ax.imshow(img.reshape((28, 28)))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
fig.tight_layout(pad=0.1)
| 28.152778
| 84
| 0.676369
|
bb395edf7bd9f0a5f83c1a01f74bb440db3d395b
| 4,499
|
py
|
Python
|
UnifiedPipeline/automl_inference/scripts/forecast.py
|
nickwiecien/solution-accelerator-many-models
|
ff286029b474ebff09ff010418be56e2eb55de57
|
[
"MIT"
] | null | null | null |
UnifiedPipeline/automl_inference/scripts/forecast.py
|
nickwiecien/solution-accelerator-many-models
|
ff286029b474ebff09ff010418be56e2eb55de57
|
[
"MIT"
] | null | null | null |
UnifiedPipeline/automl_inference/scripts/forecast.py
|
nickwiecien/solution-accelerator-many-models
|
ff286029b474ebff09ff010418be56e2eb55de57
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pandas as pd
import os
import argparse
from sklearn.externals import joblib
import datetime
import hashlib
from azureml.core import Run
from azureml.core.model import Model
from azureml_user.parallel_run import EntryScript
import numpy as np
# 0.0 Parse input arguments
parser = argparse.ArgumentParser("split")
parser.add_argument("--group_column_names", '--nargs',
nargs='*', type=str, help="group_column_names")
parser.add_argument("--target_column_name", type=str,
help="target column", default=None)
parser.add_argument("--time_column_name", type=str,
help="time column", default=None)
parser.add_argument("--many_models_run_id",
type=str,
default=None,
required=False,
help="many_models_run_id: many models training run id.")
args, _ = parser.parse_known_args()
print("Argument 1 group_column_names: {}".format(args.group_column_names))
print("Argument 2 target_column_name: {}".format(args.target_column_name))
print("Argument 3 time_column_name: {}".format(args.time_column_name))
if hasattr(args, "many_models_run_id") and not args.many_models_run_id:
print("Argument 4 many_models_run_id: {}".format(args.many_models_run_id))
current_step_run = Run.get_context()
def run(input_data):
# 1.0 Set up Logging
entry_script = EntryScript()
logger = entry_script.logger
logger.info('Making forecasts')
os.makedirs('./outputs', exist_ok=True)
all_predictions = pd.DataFrame()
# 2.0 Iterate through input data
for idx, file_path in enumerate(input_data):
date1 = datetime.datetime.now()
file_name, file_extension = os.path.splitext(
os.path.basename(file_path))
logger.info(file_path)
if file_extension.lower() == ".parquet":
data = pd.read_parquet(file_path)
else:
data = pd.read_csv(file_path)
tags_dict = {}
if hasattr(args, "many_models_run_id") and args.many_models_run_id:
tags_dict['RunId'] = args.many_models_run_id
for column_name in args.group_column_names:
tags_dict.update(
{column_name: str(data.iat[0, data.columns.get_loc(column_name)])})
print(tags_dict)
model_string = '_'.join(str(v) for k, v in sorted(
tags_dict.items()) if k in args.group_column_names)
logger.info("model string to encode " + model_string)
sha = hashlib.sha256()
sha.update(model_string.encode())
model_name = 'automl_' + sha.hexdigest()
logger.info('starting (' + file_path + ') ' + str(date1))
ws = current_step_run.experiment.workspace
logger.info('query the model ' + model_name)
model_list = Model.list(ws, name=model_name,
tags=tags_dict, latest=True)
if not model_list:
print("Could not find model")
continue
# 4.0 Un-pickle model and make predictions
model_path = model_list[0].download(exist_ok=True)
model = joblib.load(model_path)
model_name = model_list[0].name
print('Unpickled the model ' + model_name)
# Grab relevant model metrics
run_id = model_list[0].run_id
run = Run.get(ws, run_id)
target_metric = run.get_metrics(name='mean_absolute_error')['mean_absolute_error']
X_test = data.copy()
if args.target_column_name is not None:
X_test.pop(args.target_column_name)
print("prediction data head")
print(X_test.head())
y_predictions, X_trans = model.forecast(
X_test, ignore_data_errors=True)
print('Made predictions ' + model_name)
# Insert predictions/model metrics to test set
predicted_column_name = 'Predictions'
data[predicted_column_name] = y_predictions
data['model_metric'] = np.full(len(y_predictions), target_metric)
print(data.head())
print('Inserted predictions ' + model_name)
cols = list(data.columns.values)
print(cols)
all_predictions = all_predictions.append(data)
# 5.0 Log the run
date2 = datetime.datetime.now()
logger.info('ending (' + str(file_path) + ') ' + str(date2))
print(all_predictions.head())
return all_predictions
| 34.875969
| 90
| 0.649922
|
d1f889eccfac3e3e6dad0a4724181971d88bfcd7
| 917
|
py
|
Python
|
api/weather_data_flaskapi/date_range_arguments.py
|
Fyzel/weather-data-flaskapi
|
6b06c1f79091bbb5c9ee3327d2ff778c90bb28a8
|
[
"Apache-2.0"
] | 1
|
2017-09-24T03:30:55.000Z
|
2017-09-24T03:30:55.000Z
|
api/weather_data_flaskapi/date_range_arguments.py
|
Fyzel/weather-data-flaskapi
|
6b06c1f79091bbb5c9ee3327d2ff778c90bb28a8
|
[
"Apache-2.0"
] | 17
|
2017-09-27T23:54:02.000Z
|
2022-03-31T11:10:18.000Z
|
api/weather_data_flaskapi/date_range_arguments.py
|
Fyzel/weather-data-flaskapi
|
6b06c1f79091bbb5c9ee3327d2ff778c90bb28a8
|
[
"Apache-2.0"
] | 1
|
2020-06-15T19:29:56.000Z
|
2020-06-15T19:29:56.000Z
|
"""
@author: Fyzel@users.noreply.github.com
@copyright: 2017 Englesh.org. All rights reserved.
@license: https://github.com/Fyzel/weather-data-flaskapi/blob/master/LICENSE
@contact: Fyzel@users.noreply.github.com
@deffield updated: 2017-06-14
"""
from flask_restplus import reqparse
from datetime import date, timedelta
date_range_arguments = reqparse.RequestParser()
date_range_arguments.add_argument('start-date',
type=str,
required=False,
default=str(date.today() - timedelta(days=7)),
help='Start date')
date_range_arguments.add_argument('end-date',
type=str,
required=False,
default=str(date.today()),
help='End date')
| 32.75
| 80
| 0.534351
|
c1d1294dd1c0a965ac10297f7e74018a0fa0643a
| 149
|
py
|
Python
|
home/views.py
|
xuxiaowei-com-cn/Django-demo
|
110d15cd615854bb6732d26ddd85f45afe7d7d0a
|
[
"MIT"
] | null | null | null |
home/views.py
|
xuxiaowei-com-cn/Django-demo
|
110d15cd615854bb6732d26ddd85f45afe7d7d0a
|
[
"MIT"
] | null | null | null |
home/views.py
|
xuxiaowei-com-cn/Django-demo
|
110d15cd615854bb6732d26ddd85f45afe7d7d0a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def index(request):
"""学习笔记的主页"""
return render(request, 'home/index.html')
| 16.555556
| 45
| 0.697987
|
dfe0046f340fca5b36bdf27ec191fc29895494db
| 1,512
|
py
|
Python
|
everyday_wechat/control/onewords/juzimi.py
|
jianxchen/EverydayWechat
|
93c1e25fca066587afe2d1196ca8382761c9dbfb
|
[
"MIT"
] | 1
|
2021-05-18T07:06:52.000Z
|
2021-05-18T07:06:52.000Z
|
everyday_wechat/control/onewords/juzimi.py
|
jianxchen/EverydayWechat
|
93c1e25fca066587afe2d1196ca8382761c9dbfb
|
[
"MIT"
] | null | null | null |
everyday_wechat/control/onewords/juzimi.py
|
jianxchen/EverydayWechat
|
93c1e25fca066587afe2d1196ca8382761c9dbfb
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
句子迷:(https://www.juzimi.com/)
民国情书:朱生豪先生的情话 && 爱你就像爱生命
Author: ClaireYiu(https://github.com/ClaireYiu)
"""
import random
import requests
from everyday_wechat.utils.common import (
Proxies
)
# from requests_html import HTMLSession
def get_zsh_info():
"""
句子迷:(https://www.juzimi.com/)
朱生豪:https://www.juzimi.com/writer/朱生豪
爱你就像爱生命(王小波):https://www.juzimi.com/article/爱你就像爱生命
三行情书:https://www.juzimi.com/article/25637
:return: str 情话
"""
print('正在获取民国情话...')
try:
name = [
['writer/朱生豪', 38, ],
['article/爱你就像爱生命', 22],
['article/25637', 55],
]
apdix = random.choice(name)
# page 从零开始计数的。
url = 'https://www.juzimi.com/{}?page={}'.format(
apdix[0], random.randint(1, apdix[1]))
# print(url)
resp = requests.get(url,proxies=Proxies)
if resp.status_code == 200:
# print(resp.html)
# results = resp.find('a.xlistju')
# if results:
# re_text = random.choice(results).text
# if re_text and '\n\n' in re_text:
# re_text = re_text.replace('\n\n','\n')
# return re_text
return None
print('获取民国情话失败..')
except Exception as exception:
print(exception)
return None
get_one_words = get_zsh_info
if __name__ == '__main__':
# for _ in range(15):
# ow = get_one_words()
# print(ow)
pass
| 25.2
| 60
| 0.554233
|
54a7786ab0f5fafc80c05ae34573918c76f15bf6
| 1,290
|
py
|
Python
|
fpga/boards/artya7-100t/c-class/configure/main.py
|
Rajssss/shakti-soc
|
7dbf88dd7e568c9f1fcd67ee8fbf579f2fe21f9d
|
[
"BSD-3-Clause"
] | null | null | null |
fpga/boards/artya7-100t/c-class/configure/main.py
|
Rajssss/shakti-soc
|
7dbf88dd7e568c9f1fcd67ee8fbf579f2fe21f9d
|
[
"BSD-3-Clause"
] | null | null | null |
fpga/boards/artya7-100t/c-class/configure/main.py
|
Rajssss/shakti-soc
|
7dbf88dd7e568c9f1fcd67ee8fbf579f2fe21f9d
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import os
import shutil
import sys
import configure.configure as configure
import configure.utils as utils
def main():
'''
Entry point for riscv_config.
'''
# Set up the parser
parser = utils.config_cmdline_args()
args = parser.parse_args()
# Set up the logger
utils.setup_logging(args.verbose)
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
ch.setFormatter(utils.ColoredFormatter())
logger.addHandler(ch)
logger.info('************ C-Class Core Generator ************ ')
logger.info('----------- Copyright (c) IIT Madras ----------- ')
logger.info('---------- Available under BSD License---------- ')
logger.info('\n\n')
if args.clean is None:
update_dep = True
patch = True
else:
update_dep = False
patch = False
if logging:
logger.info('Checking pre-requisites')
configure.check_prerequisites()
configure.handle_dependencies(args.verbose, args.clean,update_dep,patch)
if args.ispec is None:
logger.info('No Input YAML provided')
sys.exit(0)
elif args.clean is None:
configure.validate_specs(os.path.abspath(args.ispec), True)
if __name__ == "__main__":
exit(main())
| 26.326531
| 76
| 0.624806
|
3dfd3be150bd887bb4b7c89a86b4c32fa4304b67
| 645
|
py
|
Python
|
digital/db/migration.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
digital/db/migration.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
digital/db/migration.py
|
knowx/digital
|
47872a783856444cce6ff8ebda355f3f3da727ac
|
[
"Apache-2.0"
] | null | null | null |
from stevedore import driver
from digital import conf
CONF = conf.CONF
_IMPL = None
def get_backend():
global _IMPL
if not _IMPL:
_IMPL = driver.DriverManager("digital.database.migration_backend",
"sqlalchemy").driver
return _IMPL
def upgrade(version=None):
"""Migrate the database to `version` or the most recent version."""
return get_backend().upgrade(version)
def version():
return get_backend().version()
def stamp(version):
return get_backend().stamp(version)
def revision(message, autogenerate):
return get_backend().revision(message, autogenerate)
| 20.15625
| 74
| 0.67907
|
fe745f0d591a9a3ea36ce38acd032e16b64869c2
| 2,794
|
py
|
Python
|
masonite/managers/Manager.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
masonite/managers/Manager.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
masonite/managers/Manager.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
""" Manager Module """
import inspect
from masonite.exceptions import (DriverNotFound,
MissingContainerBindingNotFound,
UnacceptableDriverType)
class Manager:
"""Base Manager Class
"""
config = None
driver_prefix = None
def __init__(self, container=None):
"""Manager constructor
Keyword Arguments:
container {masonite.app.App} -- The container class (default: {None})
"""
self.manage_driver = None
self.container = container
def load_container(self, container):
"""Loads the container into the class and creates the default driver
Arguments:
container {masonite.app.App} -- The container class
Returns:
self
"""
self.container = container
self.create_driver()
return self
def driver(self, driver):
"""Creates the driver specified and returns the driver instance.
Arguments:
driver {masonite.drivers.Driver} -- An instance of a Driver class.
Returns:
masonite.drivers.Driver -- Returns a driver which is an instance of the base Driver class.
"""
self.create_driver(driver)
return self.container.resolve(self.manage_driver).load_manager(self)
def create_driver(self, driver=None):
"""Creates the driver to be used.
This could be used as the default driver when the manager is created or called internally on the fly
to change to a specific driver
Keyword Arguments:
driver {string} -- The name of the driver to switch to (default: {None})
Raises:
UnacceptableDriverType -- Raised when a driver passed in is not a string or a class
DriverNotFound -- Raised when the driver can not be found.
"""
if not driver:
driver = self.container.make(self.config).DRIVER.capitalize()
else:
if isinstance(driver, str):
driver = driver.capitalize()
try:
if isinstance(driver, str):
self.manage_driver = self.container.make(
'{0}{1}Driver'.format(self.driver_prefix, driver)
)
return
elif inspect.isclass(driver):
self.manage_driver = driver
return
raise UnacceptableDriverType(
'String or class based driver required. {} driver recieved.'.format(driver))
except MissingContainerBindingNotFound:
raise DriverNotFound(
'Could not find the {0}{1}Driver from the service container. Are you missing a service provider?'.format(self.driver_prefix, driver))
| 31.75
| 149
| 0.598067
|
68c44d3b02dffc24cc4fb706eed5ce4226a78005
| 110
|
py
|
Python
|
pyspline/__init__.py
|
kanekosh/pyspline
|
13fdb0cd8231d2efdb5d5b5f4f2c0c693b51363d
|
[
"Apache-2.0"
] | null | null | null |
pyspline/__init__.py
|
kanekosh/pyspline
|
13fdb0cd8231d2efdb5d5b5f4f2c0c693b51363d
|
[
"Apache-2.0"
] | null | null | null |
pyspline/__init__.py
|
kanekosh/pyspline
|
13fdb0cd8231d2efdb5d5b5f4f2c0c693b51363d
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "1.4.0"
from .pyCurve import Curve
from .pySurface import Surface
from .pyVolume import Volume
| 18.333333
| 30
| 0.781818
|
2b4e5517afcff4fb395ed0c37f99eebad7c34df8
| 17,233
|
py
|
Python
|
chamberconnectlibrary/modbus.py
|
tim-andes/ChamberConnectLibrary
|
1d2deb8b2629e47a45a838e89419ffe1d066cecd
|
[
"MIT"
] | 21
|
2016-07-19T20:13:22.000Z
|
2021-12-15T11:18:35.000Z
|
chamberconnectlibrary/modbus.py
|
tim-andes/ChamberConnectLibrary
|
1d2deb8b2629e47a45a838e89419ffe1d066cecd
|
[
"MIT"
] | 15
|
2017-05-18T13:26:03.000Z
|
2021-12-21T17:41:33.000Z
|
chamberconnectlibrary/modbus.py
|
tim-andes/ChamberConnectLibrary
|
1d2deb8b2629e47a45a838e89419ffe1d066cecd
|
[
"MIT"
] | 13
|
2017-05-18T06:50:50.000Z
|
2022-01-28T14:09:23.000Z
|
'''
Copyright (C) Espec North America, INC. - All Rights Reserved
Written by Myles Metzler mmetzler@espec.com, Feb. 2016
Partial modbus implimantation for communicating with watlow controllers (input/holding registers only)
'''
#pylint: disable=W0703
import socket
import struct
import time
import collections
import serial
class ModbusError(Exception):
'''Generic Modbus exception.'''
pass
class Modbus(object):
'''
A subset of a modbus master library, only impliments modbus functions:
3: Read Holding Register(s)
4: Read Input Register(s)
6: Write Holding Register
16: Write Multiple Holding Registers
'''
def __init__(self, address, *args, **kwargs):
self.low_word_first = kwargs.get('low_word_first', True)
self.retry = kwargs.get('retry', False)
self.address = address
self.error_messages = {
1: 'Illegal Function',
2: 'Illegal Data Address',
3: 'Illegal Data Value',
4: 'Slave Device Failure',
5: 'Acknowledge',
6: 'Slave Device Busy',
7: 'Negative Acknowledge',
8: 'Memory Parity Error',
10:'Gateway Path Unavalable',
11:'Gateway Target Device Failed To Respond'
}
def read_input(self, register, count=1):
'''
Read input register(s)
Args:
register (int): The modbus register to read
count (int): The number of modbus registers to read (defaul=1)
Returns:
list. unsigned 16bit integers
'''
packet = self._make_packet(4, register, count)
try:
rval = self.interact(packet)
except ModbusError:
if self.retry:
rval = self.interact(packet)
else:
raise
return self._decode_packet(rval, packet)
def read_input_signed(self, register, count=1):
'''
Read some signed short(s)
Args:
register (int): The modbus register to read
count (int): The number of modbus registers to read (default=1)
Returns:
list. signed 16bit integers
'''
vals = self.read_input(register, count)
return [struct.unpack('h', struct.pack('H', val))[0] for val in vals]
def read_input_float(self, register, count=1):
'''
Read some floating point values from 2 adjacent modbus registers
Args:
register (int): the first register to start reading at.
count (int): the number of floats to read (2*count will actually be read)
Returns:
list. 32bit floats
'''
val = self.read_input(register, count*2)
fidx, sidx = (0, 1) if self.low_word_first else (1, 0)
return [
round(struct.unpack('f', struct.pack('HH', val[i+fidx], val[i+sidx]))[0], 1)
for i in range(0, count*2, 2)
]
def read_input_string(self, register, count):
'''
Read a string
Args:
register (int): The register to start reading from
count(int): The number of registers to read (length of string)
Returns:
str
'''
val = self.read_input(register, count)
rstring = ""
for char in val:
if char is not 0:
rstring = rstring + chr(char)
return rstring
def read_holding(self, register, count=1):
'''
Read holding register(s)
Args:
register (int): The modbus register to read
count (int): The number of modbus registers to read (default=1)
Returns:
list. unsigned 16bit integers
'''
packet = self._make_packet(3, register, count)
try:
rval = self.interact(packet)
except ModbusError:
if self.retry:
rval = self.interact(packet)
else:
raise
return self._decode_packet(rval, packet)
def read_holding_signed(self, register, count=1):
'''
Read some signed short(s)
Args:
register (int): The modbus register to read
count (int): The number of modbus registers to read (default=1)
Returns:
list. signed 16bit integers
'''
vals = self.read_holding(register, count)
return [struct.unpack('h', struct.pack('H', val))[0] for val in vals]
def read_holding_float(self, register, count=1):
'''
Read some floating point values from 2 adjacent modbus registers
Args:
register (int): the first register to start reading at.
count (int): the number of floats to read (2*count will actually be read)
Returns:
list. 32bit floats
'''
val = self.read_holding(register, count*2)
fidx, sidx = (0, 1) if self.low_word_first else (1, 0)
return [
round(struct.unpack('f', struct.pack('HH', val[i+fidx], val[i+sidx]))[0], 1)
for i in range(0, count*2, 2)
]
def read_holding_string(self, register, count):
'''
Read a string
Args:
register (int): The register to start reading from
count(int): The number of registers to read (length of string)
Returns:
str
'''
val = self.read_holding(register, count)
rstring = ""
for char in val:
if char is not 0:
rstring = rstring + chr(char)
return rstring
def write_holding(self, register, value):
'''
Write to holding 16bit register(s), accepts single values or lists of values
Args:
register (int): register(s) to write to
value (int or list(int)): value(s) to write,
'''
packettype = 16 if isinstance(value, collections.Iterable) else 6
packet = self._make_packet(packettype, register, value)
try:
rval = self.interact(packet)
except ModbusError:
if self.retry:
rval = self.interact(packet)
else:
raise
self._decode_packet(rval, packet)
def write_holding_signed(self, register, value):
'''
Write to signed 16bit holding register(s), accepts single values or lists of values
Args:
register (int): register(s) to write to
value (int or list(int)): value(s) to write,
'''
if isinstance(value, collections.Iterable):
value = [0xFFFF & val for val in value]
else:
value = 0xFFFF & value #trim to 16bit signed int
self.write_holding(register, value)
def write_holding_float(self, register, value):
'''
Write floating point values to the controller
Args:
register (int): first register to write to, 2 float value will be written.
value (float or list(float)): vlaue(s) to write to
'''
if isinstance(value, collections.Iterable):
packval = []
for val in value:
packval += self._pack32('f', val)
else:
packval = self._pack32('f', value)
self.write_holding(register, packval)
def write_holding_string(self, register, value, length=20, padder=0):
'''
Write a string to the controller
Args:
register (int): first register to wrote to
value (str): The string to write
length (int): The string will be padded or truncated to this length.
Return:
None
'''
mods = [ord(c) for c in value]
mods.extend([padder]*length)
self.write_holding(register, mods[0:length])
def interact(self, packet):
'''Interact with the physical interface'''
raise NotImplementedError('ModbusTCP or ModbusRTU must be used not Modbus class')
def read_item(self, **kwargs):
'''
Read paramter from the controller.
kwargs:
register: int (relative register value, required)
address: int
type: string (holding/holding_signed/holding_float/holding_string/input/input_signed/input_float/input_string)
count: int (only applies to string only)
low_word_first: bool (word order for 32 bit values)
scalar: int (factor that read value will be devided by)
returns:
dict: ex: {'register':2782, 'address':1, 'type':'holding_float', 'count':1, 'low_word_first':True, 'scalar':1, 'value':50.0}
'''
return self.read_items([kwargs])[0]
def read_items(self, items):
'''
Read parameters from the controller using a list of arguments for each parameter
params:
list: ex: [{'register':2782, 'address':1, 'type':'holding_float', 'count':1, 'low_word_first':True, 'scalar':1}]
returns:
list: ex: [{'register':2782, 'address':1, 'type':'holding_float', 'count':1, 'low_word_first':True, 'scalar':1, 'value':50.0}]
'''
types = {
'holding': self.read_holding,
'holding_signed': self.read_holding_signed,
'holding_float': self.read_holding_float,
'holding_string': self.read_holding_string,
'input': self.read_input,
'input_signed': self.read_input_signed,
'input_float': self.read_input_float,
'input_string': self.read_input_string
}
for itm in items:
self.address = itm.get('address', self.address)
self.low_word_first = itm.get('low_word_first', self.low_word_first)
func = itm.get('type', 'holding')
vals = types[func](itm['register'], itm.get('count', 1))
if 'string' in func:
itm['value'] = vals
elif isinstance(vals, list):
for val in vals:
if 'scalar' in itm and itm['scalar'] != 1:
val = float(val) / itm['scalar']
itm['value'] = vals if len(vals) > 1 else vals[0]
return items
def _pack32(self, format, value):
pval = struct.unpack('HH', struct.pack(format, value))
return list(pval) if self.low_word_first else [pval[1], pval[0]]
def _make_packet(self, function, register, args):
'''Make modbus request packet.'''
if function in [3, 4, 6]:
return struct.pack(">BBHH", self.address, function, register, args)
elif function == 16:
margs = [self.address, function, register, len(args), len(args)*2] + list(args)
return struct.pack(">BBHHB%dH" % len(args), *margs)
else:
raise NotImplementedError("Supplied modbus function code is not supported.")
def _decode_packet(self, packet, spacket):
'''Decode the modbus request packet.'''
fcode = struct.unpack(">B", packet[1])[0]
addr = struct.unpack(">B", packet[0])[0]
if self.address != addr:
shex = ":".join("{:02x}".format(ord(c) for c in spacket))
rhex = ":".join("{:02x}".format(ord(c) for c in packet))
raise ModbusError("Address error; Sent=%s, Recieved=%s" % (shex, rhex))
if fcode > 127:
ecode = struct.unpack(">B", packet[2])[0]
ttp = (ecode, self.error_messages.get(ecode, 'Unknown error code'))
raise ModbusError('Modbus Error: Exception code = %d(%s)' % ttp)
if fcode in [3, 4]: #Read input/holding register(s)
cnt = struct.unpack(">B", packet[2])[0]/2
return struct.unpack(">%dH" % cnt, packet[3:])
elif fcode == 6:
pass #nothing is required
elif fcode == 16:
pass #nothing required
else:
raise NotImplementedError("Supplied modbus function code is not supported.")
class ModbusRTU(Modbus):
'''
A subset of a modbus RTU master library, only impliments modbus functions:
3: Read Holding Register(s)
4: Read Input Register(s)
6: Write Holding Register
16: Write Multiple Holding Registers
'''
def __init__(self, address, port, **kwargs):
super(ModbusRTU, self).__init__(address, port, **kwargs)
#watlow suggests using 0.012 char send time for buads greater than 19200
databits, stopbits = kwargs.get('databits', 8), kwargs.get('stopbits', 1)
baud = kwargs.get('baud', 9600)
# calculated pause time does not work on the Watlow F4T, using watlow recomended delay...
#self.pause = 3.5 * (((databits + stopbits + 2)/ baud) if baud < 19200 else 0.012)
self.pause = 0.03
self.serial = serial.Serial(
port=port,
baudrate=baud,
bytesize=databits,
parity=kwargs.get('parity', 'N'),
stopbits=stopbits,
timeout=kwargs.get('timeout', 3)
)
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
'''
Close the serial port.
'''
self.serial.close()
def _calc_crc(self, data):
'''
calculate the CRC16
'''
crc = 0xFFFF
for i in data:
crc = crc ^ ord(i)
for _ in xrange(8):
tmp = crc & 1
crc = crc >> 1
if tmp:
crc = crc ^ 0xA001
return ((crc % 256) << 8) + (crc >> 8) #swap byte order
def interact(self, packet):
crc = struct.pack(">H", self._calc_crc(packet))
self.serial.write(packet + crc)
time.sleep(self.pause)
head = self.serial.read(2)
if len(head) == 0:
raise ModbusError("The slave device did not respond.")
raddress = struct.unpack('>B', head[0])[0]
fcode = struct.unpack('>B', head[1])[0]
if fcode == 16 or fcode == 6:
body = self.serial.read(4)
elif fcode == 3:
body = self.serial.read(1)
body += self.serial.read(struct.unpack('>B', body)[0])
elif fcode > 127:
body = self.serial.read(1)
else:
raise NotImplementedError("Only modbus function codes 3,6,16 are implimented.")
rcrc = struct.unpack('>H', self.serial.read(2))[0]
ccrc = self._calc_crc(head+body)
if self.address != raddress:
shex = ":".join(["{:02x}".format(ord(c)) for c in packet+crc])
rhex = ":".join(["{:02x}".format(ord(c)) for c in head+body+rcrc])
raise ModbusError("Address error; Sent=%s, Recieved=%s" % (shex, rhex))
if rcrc != ccrc:
shex = ":".join(["{:02x}".format(ord(c)) for c in packet+crc])
rhex = ":".join(["{:02x}".format(ord(c)) for c in head+body+rcrc])
raise ModbusError("CRC error; Sent=%s, Recieved=%s" % (shex, rhex))
return head + body
class ModbusTCP(Modbus):
'''
A subset of a modbus TCP master library, only impliments modbus functions:
3: Read Holding Register(s)
4: Read Input Register(s)
6: Write Holding Register
16: Write Multiple Holding Registers
'''
def __init__(self, address, host, port=502, **kwargs):
super(ModbusTCP, self).__init__(address, host, **kwargs)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#self.socket.settimeout(timeout)
self.socket.setblocking(True)
self.socket.connect((host, port))
self.packet_id = 1
time.sleep(0.1)
def __del__(self):
self.close()
def close(self):
'''
Close the tcp socket.
'''
self.socket.close()
time.sleep(0.1)
def _make_mbap(self, length):
'''
make the modbus mbap
'''
return struct.pack(">3H", self.packet_id, 0, length)
def interact(self, packet):
'''
interact with the slave device
'''
self.socket.send(self._make_mbap(len(packet)) + packet)
mbap_raw = self.socket.recv(6)
if len(mbap_raw) == 0:
raise ModbusError("The controller did not respond to the request (MBAP length = 0)")
if len(mbap_raw) != 6:
ttp = (len(mbap_raw), mbap_raw)
raise ModbusError("MBAP length error; expected:6, got:%s (%r)" % ttp)
mbap = struct.unpack('>3H', mbap_raw)
body = self.socket.recv(mbap[2])
if mbap[0] != self.packet_id:
ttp = (self.packet_id, mbap[0], mbap_raw)
raise ModbusError("MBAP id error; expected:%r, got:%r (%r)" % ttp)
#self.packet_id = self.packet_id + 1 if self.packet_id < 65535 else 0
return body
if __name__ == '__main__':
pkt = [
{'register':2782, 'address':1, 'type':'holding_float', 'count':1, 'low_word_first':True, 'scalar':1}
]
tst = ModbusRTU(1, 3, baud=38400, low_word_first=True)
tmp = tst.read_items(pkt)
for i in tmp:
print i # pylint: disable=E1601
| 34.260437
| 142
| 0.562409
|
809d166ffc35ab942af097c6c3c5d1f46e052246
| 1,571
|
py
|
Python
|
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetElementEstimateRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetElementEstimateRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-companyreg/aliyunsdkcompanyreg/request/v20201022/GetElementEstimateRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcompanyreg.endpoint import endpoint_data
class GetElementEstimateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'companyreg', '2020-10-22', 'GetElementEstimate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_BizId(self):
return self.get_query_params().get('BizId')
def set_BizId(self,BizId):
self.add_query_param('BizId',BizId)
| 35.704545
| 78
| 0.762572
|
ad05156989187a0fa93c37df148b12e1e53d8b36
| 1,149
|
py
|
Python
|
sysinfo.py
|
robDaglio/sysinfo
|
f7a2325b9e6815bb12cb439853c0d26dccab4782
|
[
"MIT"
] | null | null | null |
sysinfo.py
|
robDaglio/sysinfo
|
f7a2325b9e6815bb12cb439853c0d26dccab4782
|
[
"MIT"
] | null | null | null |
sysinfo.py
|
robDaglio/sysinfo
|
f7a2325b9e6815bb12cb439853c0d26dccab4782
|
[
"MIT"
] | null | null | null |
# ======================================================|
# Program Name: sysinfo
# Author: Rob Daglio
# Last Updated: 10_11_2019
# Descript: Simple system information gathering script
#========================================================|
#!/usr/bin/env python
import os
from platform import uname
from time import sleep
def system_info():
criteria = ['[*] Platform: ',
'[*] System Name: ',
'[*] Kernel Version: ',
'[*] Kernel Details: ',
'[*] Architecture: ',
'[*] Processor: ',]
print("\n|===================| System Information |====================|\n")
for index, item in enumerate(uname()):
if item == "":
print(f"{criteria[index]}n\\a")
else:
print(f"{criteria[index]}{item}")
print("\n|=============================================================|\n")
def check_os():
if os.name == 'posix' or os.name == 'linux':
os.system('clear')
sleep(1)
system_info()
elif os.name == ('nt'):
os.system('cls')
sleep(1)
system_info()
else:
pass
if __name__ == '__main__':
check_os()
| 23.9375
| 80
| 0.440383
|
fb8a9bd1c1c4b066ec3f882d94544d5192d16f96
| 54,760
|
py
|
Python
|
tests/contract/test_resource_client.py
|
WaelA/cloudformation-cli
|
9a2c6a357036286c04fc8585469ddbae9220df38
|
[
"Apache-2.0"
] | null | null | null |
tests/contract/test_resource_client.py
|
WaelA/cloudformation-cli
|
9a2c6a357036286c04fc8585469ddbae9220df38
|
[
"Apache-2.0"
] | null | null | null |
tests/contract/test_resource_client.py
|
WaelA/cloudformation-cli
|
9a2c6a357036286c04fc8585469ddbae9220df38
|
[
"Apache-2.0"
] | null | null | null |
# fixture and parameter have the same name
# pylint: disable=redefined-outer-name,protected-access
import logging
import time
from io import StringIO
from unittest.mock import ANY, patch
import pytest
from rpdk.core.boto_helpers import LOWER_CAMEL_CRED_KEYS
from rpdk.core.contract.interface import Action, HandlerErrorCode, OperationStatus
from rpdk.core.contract.resource_client import (
ResourceClient,
override_properties,
prune_properties,
prune_properties_from_model,
prune_properties_if_not_exist_in_path,
prune_properties_which_dont_exist_in_path,
)
from rpdk.core.exceptions import InvalidProjectError
from rpdk.core.test import (
DEFAULT_ENDPOINT,
DEFAULT_FUNCTION,
DEFAULT_REGION,
empty_override,
)
EMPTY_OVERRIDE = empty_override()
ACCOUNT = "11111111"
LOG = logging.getLogger(__name__)
SCHEMA = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
"primaryIdentifier": ["/properties/c"],
"writeOnlyProperties": ["/properties/d"],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
SCHEMA_WITH_MULTIPLE_WRITE_PROPERTIES = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
"primaryIdentifier": ["/properties/c"],
"writeOnlyProperties": ["/properties/d", "/properties/a"],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
SCHEMA_ = {
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
"primaryIdentifier": ["/properties/c"],
"writeOnlyProperties": ["/properties/d"],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
SCHEMA_WITH_NESTED_PROPERTIES = {
"properties": {
"a": {"type": "string"},
"g": {"type": "number"},
"b": {"$ref": "#/definitions/c"},
"f": {
"type": "array",
"items": {"$ref": "#/definitions/c"},
},
"h": {
"type": "array",
"insertionOrder": "false",
"items": {"$ref": "#/definitions/c"},
},
"i": {
"type": "array",
"insertionOrder": "false",
"items": "string",
},
},
"definitions": {
"c": {
"type": "object",
"properties": {"d": {"type": "integer"}, "e": {"type": "integer"}},
}
},
"readOnlyProperties": ["/properties/a"],
"primaryIdentifier": ["/properties/a"],
"writeOnlyProperties": ["/properties/g"],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
SCHEMA_WITH_COMPOSITE_KEY = {
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
},
"readOnlyProperties": ["/properties/d"],
"createOnlyProperties": ["/properties/c"],
"primaryIdentifier": ["/properties/c", "/properties/d"],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
SCHEMA_WITH_ADDITIONAL_IDENTIFIERS = {
"properties": {
"a": {"type": "number"},
"b": {"type": "number"},
"c": {"type": "number"},
"d": {"type": "number"},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
"primaryIdentifier": ["/properties/c"],
"additionalIdentifiers": [["/properties/b"]],
"handlers": {"create": {}, "delete": {}, "read": {}},
}
EMPTY_SCHEMA = {"handlers": {"create": [], "delete": [], "read": []}}
@pytest.fixture
def resource_client():
endpoint = "https://"
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION, endpoint, DEFAULT_REGION, EMPTY_SCHEMA, EMPTY_OVERRIDE
)
mock_sesh.client.assert_called_once_with("lambda", endpoint_url=endpoint)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client._function_name == DEFAULT_FUNCTION
assert client._schema == EMPTY_SCHEMA
assert client._overrides == EMPTY_OVERRIDE
assert client.account == ACCOUNT
return client
@pytest.fixture
def resource_client_no_handler():
endpoint = "https://"
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION, endpoint, DEFAULT_REGION, {}, EMPTY_OVERRIDE
)
mock_sesh.client.assert_called_once_with("lambda", endpoint_url=endpoint)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client._function_name == DEFAULT_FUNCTION
assert client._schema == {}
assert client._overrides == EMPTY_OVERRIDE
assert client.account == ACCOUNT
return client
@pytest.fixture
def resource_client_inputs():
endpoint = "https://"
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION,
endpoint,
DEFAULT_REGION,
EMPTY_SCHEMA,
EMPTY_OVERRIDE,
{"CREATE": {"a": 1}, "UPDATE": {"a": 2}, "INVALID": {"b": 2}},
)
mock_sesh.client.assert_called_once_with("lambda", endpoint_url=endpoint)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client._function_name == DEFAULT_FUNCTION
assert client._schema == EMPTY_SCHEMA
assert client._overrides == EMPTY_OVERRIDE
assert client.account == ACCOUNT
return client
@pytest.fixture(params=[SCHEMA_, SCHEMA_WITH_ADDITIONAL_IDENTIFIERS])
def resource_client_inputs_schema(request):
endpoint = "https://"
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION,
endpoint,
DEFAULT_REGION,
request.param,
EMPTY_OVERRIDE,
{
"CREATE": {"a": 111, "c": 2, "d": 3},
"UPDATE": {"a": 1, "c": 2},
"INVALID": {"c": 3},
},
)
mock_sesh.client.assert_called_once_with("lambda", endpoint_url=endpoint)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client._function_name == DEFAULT_FUNCTION
assert client._schema == request.param
assert client._overrides == EMPTY_OVERRIDE
assert client.account == ACCOUNT
return client
@pytest.fixture
def resource_client_inputs_composite_key():
endpoint = "https://"
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION,
endpoint,
DEFAULT_REGION,
SCHEMA_WITH_COMPOSITE_KEY,
EMPTY_OVERRIDE,
{
"CREATE": {"a": 111, "c": 2},
"UPDATE": {"a": 1, "c": 2},
"INVALID": {"c": 3},
},
)
mock_sesh.client.assert_called_once_with("lambda", endpoint_url=endpoint)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client._function_name == DEFAULT_FUNCTION
assert client._schema == SCHEMA_WITH_COMPOSITE_KEY
assert client._overrides == EMPTY_OVERRIDE
assert client.account == ACCOUNT
return client
def test_prune_properties():
document = {
"foo": "bar",
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
prune_properties(document, [("foo",), ("spam",), ("not_found",), ("array", "1")])
assert document == {"one": "two", "array": ["first"]}
def test_prune_properties_for_all_sequence_members():
document: dict = {
"foo": "bar",
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
prune_properties(
document,
[
("foo",), # prune foo: bar
("spam",), # prune spam: eggs
("not_found",), # missing members are fine
(
"not_found", # missing sequences are fine
"*",
),
(
"array", # prune members of sequence "array"
"*",
),
],
)
assert document == {"one": "two", "array": []}
def test_prune_properties_nested_sequence():
document: dict = {
"array": [
{
"outer1": {"inner1": "valueA", "inner2": "valueA"},
"outer2": ["valueA", "valueB"],
},
{
"outer1": {"inner1": "valueB", "inner2": "valueB"},
"outer2": ["valueC", "valueD"],
},
],
}
prune_properties(
document,
[
(
"not_found",
"*",
"not_found",
"*",
),
(
"array",
"*",
"outer1",
"inner1",
),
(
"array",
"*",
"outer2",
"*",
),
],
)
assert document == {
"array": [
{"outer1": {"inner2": "valueA"}, "outer2": []},
{"outer1": {"inner2": "valueB"}, "outer2": []},
]
}
def test_prune_properties_nested_sequence_2():
document: dict = {
"array": [
{
"array2": [{"i1": "A", "i2": "B"}, {"i1": "C", "i2": "D"}],
"outer1": {"inner1": "valueA", "inner2": "valueA"},
"outer2": ["valueA", "valueB"],
},
{
"array2": [{"i1": "E", "i2": "F"}, {"i1": "G", "i2": "H"}],
"outer1": {"inner1": "valueB", "inner2": "valueB"},
"outer2": ["valueC", "valueD"],
},
],
}
prune_properties(
document,
[
(
"not_found",
"*",
"not_found",
"*",
),
(
"array",
"*",
"outer1",
"inner1",
),
(
"array",
"*",
"outer2",
"*",
),
(
"array",
"1",
"1",
"i1",
),
],
)
assert document == {
"array": [
{
"array2": [{"i1": "A", "i2": "B"}, {"i1": "C", "i2": "D"}],
"outer1": {"inner2": "valueA"},
"outer2": [],
},
{
"array2": [{"i1": "E", "i2": "F"}, {"i1": "G", "i2": "H"}],
"outer1": {"inner2": "valueB"},
"outer2": [],
},
]
}
def test_prune_properties_specific_sequence_indices():
document: dict = {
"array": [
{
"outer1": {"inner1": "valueA", "inner2": "valueA"},
"outer2": ["valueA", "valueB"],
},
{
"outer1": {"inner1": "valueB", "inner2": "valueB"},
"outer2": ["valueC", "valueD"],
},
],
}
prune_properties(
document,
[
(
"array",
"0",
"outer1",
"inner1",
),
(
"array",
"1",
"outer2",
"1",
),
],
)
assert document == {
"array": [
{"outer1": {"inner2": "valueA"}, "outer2": ["valueA", "valueB"]},
{"outer1": {"inner1": "valueB", "inner2": "valueB"}, "outer2": ["valueC"]},
]
}
def test_prune_properties_from_model():
document = {
"foo": "bar",
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
prune_properties_from_model(
document,
[
("properties", "foo"),
("properties", "spam"),
("properties", "not_found"),
("properties", "array", "1"),
],
)
assert document == {"one": "two", "array": ["first"]}
def test_prune_properties_if_not_exist_in_path():
previous_model = {
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
model = {
"foo": "bar",
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
model = prune_properties_if_not_exist_in_path(
model,
previous_model,
[
("properties", "foo"),
("properties", "spam"),
("properties", "array", "1"),
("properties", "invalid"),
],
)
assert model == previous_model
def test_prune_properties_which_dont_exist_in_path():
model = {
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
model1 = prune_properties_which_dont_exist_in_path(
model,
[
("properties", "one"),
],
)
assert model1 == {"one": "two"}
def test_init_sam_cli_client():
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
with patch_sesh as mock_create_sesh, patch_creds as mock_creds:
with patch_account as mock_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
client = ResourceClient(
DEFAULT_FUNCTION, DEFAULT_ENDPOINT, DEFAULT_REGION, {}, EMPTY_OVERRIDE
)
mock_sesh.client.assert_called_once_with(
"lambda", endpoint_url=DEFAULT_ENDPOINT, use_ssl=False, verify=False, config=ANY
)
mock_creds.assert_called_once_with(mock_sesh, LOWER_CAMEL_CRED_KEYS, None)
mock_account.assert_called_once_with(mock_sesh, {})
assert client.account == ACCOUNT
def test_generate_token():
token = ResourceClient.generate_token()
assert isinstance(token, str)
assert len(token) == 36
@pytest.mark.parametrize("resource_type", [None, "Org::Srv::Type"])
@pytest.mark.parametrize("log_group_name", [None, "random_name"])
@pytest.mark.parametrize(
"log_creds",
[
{},
{
"AccessKeyId": object(),
"SecretAccessKey": object(),
"SessionToken": object(),
},
],
)
def test_make_request(resource_type, log_group_name, log_creds):
desired_resource_state = object()
previous_resource_state = object()
token = object()
request = ResourceClient.make_request(
desired_resource_state,
previous_resource_state,
"us-east-1",
ACCOUNT,
"CREATE",
{},
resource_type,
log_group_name,
log_creds,
token,
)
expected_request = {
"requestData": {
"callerCredentials": {},
"resourceProperties": desired_resource_state,
"previousResourceProperties": previous_resource_state,
"logicalResourceId": token,
"typeConfiguration": None,
},
"region": DEFAULT_REGION,
"awsAccountId": ACCOUNT,
"action": "CREATE",
"bearerToken": token,
"callbackContext": None,
"resourceType": resource_type,
}
if log_group_name and log_creds:
expected_request["requestData"]["providerCredentials"] = log_creds
expected_request["requestData"]["providerLogGroupName"] = log_group_name
assert request == expected_request
def test_get_metadata(resource_client):
schema = {
"properties": {
"a": {"type": "array", "const": 1, "insertionOrder": "true"},
"b": {"type": "number", "const": 2, "insertionOrder": "false"},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/c"],
"createOnlyProperties": ["/properties/d"],
}
resource_client._update_schema(schema)
assert resource_client.get_metadata() == {"b"}
def test_update_schema(resource_client):
resource_client._strategy = object()
schema = {
"primaryIdentifier": ["/properties/a"],
"readOnlyProperties": ["/properties/b"],
"writeOnlyProperties": ["/properties/c"],
"createOnlyProperties": ["/properties/d"],
}
resource_client._update_schema(schema)
assert resource_client._schema is schema
assert resource_client._strategy is None
assert resource_client.primary_identifier_paths == {("properties", "a")}
assert resource_client.read_only_paths == {("properties", "b")}
assert resource_client.write_only_paths == {("properties", "c")}
assert resource_client.create_only_paths == {("properties", "d")}
def test_strategy(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/c"],
"createOnlyProperties": ["/properties/d"],
}
resource_client._update_schema(schema)
assert resource_client._schema is schema
assert resource_client._strategy is None
strategy = resource_client.strategy
assert resource_client._strategy is strategy
assert strategy.example() == {"a": 1, "b": 2, "d": 4}
cached = resource_client.strategy
assert cached is strategy
assert resource_client._strategy is strategy
def test_invalid_strategy(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/c"],
"createOnlyProperties": ["/properties/d"],
}
resource_client._update_schema(schema)
assert resource_client._schema is schema
assert resource_client._strategy is None
invalid_strategy = resource_client.invalid_strategy
assert resource_client._invalid_strategy is invalid_strategy
assert invalid_strategy.example() == {"a": 1, "b": 2, "c": 3, "d": 4}
cached = resource_client.invalid_strategy
assert cached is invalid_strategy
assert resource_client._invalid_strategy is invalid_strategy
def test_update_strategy(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
"d": {"type": "number", "const": 4},
},
"readOnlyProperties": ["/properties/c"],
"createOnlyProperties": ["/properties/d"],
}
resource_client._update_schema(schema)
assert resource_client._schema is schema
assert resource_client._update_strategy is None
update_strategy = resource_client.update_strategy
assert resource_client._update_strategy is update_strategy
assert update_strategy.example() == {"a": 1, "b": 2}
cached = resource_client.update_strategy
assert cached is update_strategy
assert resource_client._update_strategy is update_strategy
def test_generate_create_example(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
},
"readOnlyProperties": ["/properties/b"],
}
resource_client._update_schema(schema)
example = resource_client.generate_create_example()
assert example == {"a": 1}
def test_generate_invalid_create_example(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
},
"readOnlyProperties": ["/properties/b"],
}
resource_client._update_schema(schema)
example = resource_client.generate_invalid_create_example()
assert example == {"a": 1, "b": 2}
def test_generate_update_example(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
}
resource_client._update_schema(schema)
resource_client._overrides = {}
model_from_created_resource = {"b": 2, "a": 4}
example = resource_client.generate_update_example(model_from_created_resource)
assert example == {"a": 1, "b": 2}
def test_generate_invalid_update_example(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
}
resource_client._update_schema(schema)
resource_client._overrides = {}
model_from_created_resource = {"b": 2, "a": 4}
example = resource_client.generate_invalid_update_example(
model_from_created_resource
)
assert example == {"a": 1, "b": 2, "c": 3}
def test_generate_update_example_update_override(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
}
resource_client._update_schema(schema)
overrides = {"UPDATE": {"a": 2}, "CREATE": {"a": 5}}
resource_client._overrides = overrides
model_from_created_resource = {"b": 2, "a": 4}
example = resource_client.generate_update_example(model_from_created_resource)
assert example == {"a": 2, "b": 2}
def test_generate_update_example_create_override(resource_client):
schema = {
"properties": {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
},
"readOnlyProperties": ["/properties/b"],
"createOnlyProperties": ["/properties/c"],
}
resource_client._update_schema(schema)
overrides = {"CREATE": {"a": 5}}
resource_client._overrides = overrides
model_from_created_resource = {"b": 2, "a": 4}
example = resource_client.generate_update_example(model_from_created_resource)
assert example == {"a": 5, "b": 2}
def test_has_only_writable_identifiers_primary_is_read_only(resource_client):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo"],
"readOnlyProperties": ["/properties/foo"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_primary_is_writable(resource_client):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo"],
"createOnlyProperties": ["/properties/foo"],
}
)
assert resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_primary_and_additional_are_read_only(
resource_client,
):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo"],
"additionalIdentifiers": [["/properties/bar"]],
"readOnlyProperties": ["/properties/foo", "/properties/bar"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_additional_is_writable(resource_client):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo"],
"additionalIdentifiers": [["/properties/bar"]],
"readOnlyProperties": ["/properties/foo"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_compound_is_writable(resource_client):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo"],
"additionalIdentifiers": [["/properties/bar", "/properties/baz"]],
"readOnlyProperties": ["/properties/foo", "/properties/baz"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_composite_primary_are_read_only(
resource_client,
):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo", "/properties/bar"],
"readOnlyProperties": ["/properties/foo", "/properties/bar"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_composite_primary_is_read_only(
resource_client,
):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo", "/properties/bar"],
"readOnlyProperties": ["/properties/foo"],
"createOnlyProperties": ["/properties/bar"],
}
)
assert not resource_client.has_only_writable_identifiers()
def test_has_only_writable_identifiers_composite_primary_are_writable(
resource_client,
):
resource_client._update_schema(
{
"primaryIdentifier": ["/properties/foo", "/properties/bar"],
"createOnlyProperties": ["/properties/foo", "/properties/bar"],
}
)
assert resource_client.has_only_writable_identifiers()
def test_make_payload(resource_client):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
token = "ecba020e-b2e6-4742-a7d0-8a06ae7c4b2f"
with patch.object(
resource_client, "generate_token", return_value=token
), patch_creds:
payload = resource_client._make_payload("CREATE", {"foo": "bar"})
assert payload == {
"requestData": {
"callerCredentials": {},
"resourceProperties": {"foo": "bar"},
"previousResourceProperties": None,
"logicalResourceId": token,
"typeConfiguration": None,
},
"region": DEFAULT_REGION,
"awsAccountId": ACCOUNT,
"action": "CREATE",
"bearerToken": token,
"callbackContext": None,
"resourceType": None,
}
@pytest.mark.parametrize("action", [Action.READ, Action.LIST])
def test_call_sync(resource_client, action):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client = resource_client._client
mock_client.invoke.return_value = {"Payload": StringIO('{"status": "SUCCESS"}')}
with patch_creds:
status, response = resource_client.call(action, {"resourceModel": SCHEMA})
assert status == OperationStatus.SUCCESS
assert response == {"status": OperationStatus.SUCCESS.value}
def test_call_docker():
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
patch_docker = patch("rpdk.core.contract.resource_client.docker", autospec=True)
with patch_sesh as mock_create_sesh, patch_docker as mock_docker, patch_creds:
with patch_account:
mock_client = mock_docker.from_env.return_value
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
resource_client = ResourceClient(
DEFAULT_FUNCTION,
"url",
DEFAULT_REGION,
{},
EMPTY_OVERRIDE,
docker_image="docker_image",
executable_entrypoint="entrypoint",
)
response_str = (
"__CFN_RESOURCE_START_RESPONSE__"
'{"status": "SUCCESS"}__CFN_RESOURCE_END_RESPONSE__'
)
mock_client.containers.run.return_value = str.encode(response_str)
with patch_creds:
status, response = resource_client.call("CREATE", {"resourceModel": SCHEMA})
mock_client.containers.run.assert_called_once()
assert status == OperationStatus.SUCCESS
assert response == {"status": OperationStatus.SUCCESS.value}
def test_call_docker_executable_entrypoint_null():
patch_sesh = patch(
"rpdk.core.contract.resource_client.create_sdk_session", autospec=True
)
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
patch_account = patch(
"rpdk.core.contract.resource_client.get_account",
autospec=True,
return_value=ACCOUNT,
)
patch_docker = patch("rpdk.core.contract.resource_client.docker", autospec=True)
with patch_sesh as mock_create_sesh, patch_docker, patch_creds:
with patch_account:
mock_sesh = mock_create_sesh.return_value
mock_sesh.region_name = DEFAULT_REGION
resource_client = ResourceClient(
DEFAULT_FUNCTION,
"url",
DEFAULT_REGION,
{},
EMPTY_OVERRIDE,
docker_image="docker_image",
)
try:
with patch_creds:
resource_client.call("CREATE", {"resourceModel": SCHEMA})
except InvalidProjectError:
pass
@pytest.mark.parametrize("action", [Action.CREATE, Action.UPDATE, Action.DELETE])
def test_call_async(resource_client, action):
mock_client = resource_client._client
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client.invoke.side_effect = [
{"Payload": StringIO('{"status": "IN_PROGRESS", "resourceModel": {"c": 3} }')},
{"Payload": StringIO('{"status": "SUCCESS"}')},
]
with patch_creds:
status, response = resource_client.call(action, {})
assert status == OperationStatus.SUCCESS
assert response == {"status": OperationStatus.SUCCESS.value}
@pytest.mark.parametrize("action", [Action.CREATE, Action.UPDATE, Action.DELETE])
def test_call_async_write_only_properties_are_removed(resource_client, action):
mock_client = resource_client._client
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client.invoke.side_effect = [
{
"Payload": StringIO(
'{"status": "SUCCESS", "resourceModel": {"c": 3, "d": 4} }'
)
}
]
resource_client._update_schema(SCHEMA)
with pytest.raises(AssertionError), patch_creds:
resource_client.call(action, {})
@pytest.mark.parametrize("action", [Action.CREATE, Action.UPDATE, Action.DELETE])
def test_call_async_write_only_properties_are_not_removed_for_in_progress(
resource_client, action
):
mock_client = resource_client._client
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client.invoke.side_effect = [
{
"Payload": StringIO(
'{"status": "IN_PROGRESS", "resourceModel": {"c": 3, "d": 4} }'
)
},
{"Payload": StringIO('{"status": "SUCCESS"}')},
]
resource_client._update_schema(SCHEMA)
with patch_creds:
resource_client.call(action, {})
def test_call_and_assert_success(resource_client):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client = resource_client._client
mock_client.invoke.return_value = {"Payload": StringIO('{"status": "SUCCESS"}')}
with patch_creds:
status, response, error_code = resource_client.call_and_assert(
Action.CREATE, OperationStatus.SUCCESS, {}, None
)
assert status == OperationStatus.SUCCESS
assert response == {"status": OperationStatus.SUCCESS.value}
assert error_code is None
def test_call_and_assert_fails(resource_client_no_handler):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
with patch_creds:
try:
resource_client_no_handler.call_and_assert(
Action.CREATE, OperationStatus.SUCCESS, {}, None
)
except ValueError:
LOG.debug(
"Value Error Exception is expected when required CRD handlers are not present"
)
def test_call_and_assert_failed_invalid_payload(resource_client):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client = resource_client._client
mock_client.invoke.return_value = {"Payload": StringIO("invalid json document")}
with pytest.raises(ValueError), patch_creds:
status, response, error_code = resource_client.call_and_assert(
Action.CREATE, OperationStatus.SUCCESS, {}, None
)
def test_call_and_assert_failed(resource_client):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client = resource_client._client
mock_client.invoke.return_value = {
"Payload": StringIO('{"status": "FAILED","errorCode": "NotFound"}')
}
with patch_creds:
status, response, error_code = resource_client.call_and_assert(
Action.DELETE, OperationStatus.FAILED, {}, None
)
assert status == OperationStatus.FAILED
assert response == {"status": OperationStatus.FAILED.value, "errorCode": "NotFound"}
assert error_code == HandlerErrorCode.NotFound
def test_call_and_assert_exception_unsupported_status(resource_client):
mock_client = resource_client._client
mock_client.invoke.return_value = {
"Payload": StringIO('{"status": "FAILED","errorCode": "NotFound"}')
}
with pytest.raises(ValueError):
resource_client.call_and_assert(Action.DELETE, "OtherStatus", {}, None)
def test_call_and_assert_exception_assertion_mismatch(resource_client):
patch_creds = patch(
"rpdk.core.contract.resource_client.get_temporary_credentials",
autospec=True,
return_value={},
)
mock_client = resource_client._client
mock_client.invoke.return_value = {"Payload": StringIO('{"status": "SUCCESS"}')}
with pytest.raises(AssertionError), patch_creds:
resource_client.call_and_assert(Action.CREATE, OperationStatus.FAILED, {}, None)
@pytest.mark.parametrize("status", [OperationStatus.SUCCESS, OperationStatus.FAILED])
def test_assert_in_progress_wrong_status(status):
with pytest.raises(AssertionError):
ResourceClient.assert_in_progress(status, {})
def test_assert_in_progress_error_code_set():
with pytest.raises(AssertionError):
ResourceClient.assert_in_progress(
OperationStatus.IN_PROGRESS,
{"errorCode": HandlerErrorCode.AccessDenied.value},
)
def test_assert_in_progress_resource_models_set():
with pytest.raises(AssertionError):
ResourceClient.assert_in_progress(
OperationStatus.IN_PROGRESS, {"resourceModels": []}
)
def test_assert_in_progress_callback_delay_seconds_unset():
callback_delay_seconds = ResourceClient.assert_in_progress(
OperationStatus.IN_PROGRESS, {"resourceModels": None}
)
assert callback_delay_seconds == 0
def test_assert_in_progress_callback_delay_seconds_set():
callback_delay_seconds = ResourceClient.assert_in_progress(
OperationStatus.IN_PROGRESS, {"callbackDelaySeconds": 5}
)
assert callback_delay_seconds == 5
@pytest.mark.parametrize(
"status", [OperationStatus.IN_PROGRESS, OperationStatus.FAILED]
)
def test_assert_success_wrong_status(status):
with pytest.raises(AssertionError):
ResourceClient.assert_success(status, {})
def test_assert_success_error_code_set():
with pytest.raises(AssertionError):
ResourceClient.assert_success(
OperationStatus.SUCCESS, {"errorCode": HandlerErrorCode.AccessDenied.value}
)
def test_assert_success_callback_delay_seconds_set():
with pytest.raises(AssertionError):
ResourceClient.assert_success(
OperationStatus.SUCCESS, {"callbackDelaySeconds": 5}
)
@pytest.mark.parametrize(
"status", [OperationStatus.IN_PROGRESS, OperationStatus.SUCCESS]
)
def test_assert_failed_wrong_status(status):
with pytest.raises(AssertionError):
ResourceClient.assert_failed(status, {})
def test_assert_failed_error_code_unset():
with pytest.raises(AssertionError):
ResourceClient.assert_failed(OperationStatus.FAILED, {})
def test_assert_failed_error_code_invalid():
with pytest.raises(KeyError):
ResourceClient.assert_failed(OperationStatus.FAILED, {"errorCode": "XXX"})
def test_assert_failed_callback_delay_seconds_set():
with pytest.raises(AssertionError):
ResourceClient.assert_failed(
OperationStatus.FAILED,
{
"errorCode": HandlerErrorCode.AccessDenied.value,
"callbackDelaySeconds": 5,
},
)
def test_assert_failed_resource_models_set():
with pytest.raises(AssertionError):
ResourceClient.assert_failed(
OperationStatus.FAILED,
{"errorCode": HandlerErrorCode.AccessDenied.value, "resourceModels": []},
)
def test_assert_failed_returns_error_code():
error_code = ResourceClient.assert_failed(
OperationStatus.FAILED, {"errorCode": HandlerErrorCode.AccessDenied.value}
)
assert error_code == HandlerErrorCode.AccessDenied
def test_override_properties():
document = {
"foo": "bar",
"spam": "eggs",
"one": "two",
"array": ["first", "second"],
}
override_properties(
document,
{("foo",): "baz", ("spam",): {}, ("not_found",): None, ("array", "1"): "last"},
)
assert document == {
"foo": "baz",
"spam": {},
"one": "two",
"array": ["first", "last"],
}
def test_has_update_handler(resource_client):
schema = {"handlers": {"update": {"permissions": ["permission"]}}}
resource_client._update_schema(schema)
assert resource_client.has_update_handler()
@pytest.mark.parametrize("action", [Action.CREATE, Action.UPDATE, Action.DELETE])
def test_assert_CUD_time(resource_client, action):
resource_client.assert_time(time.time() - 59, time.time(), action)
@pytest.mark.parametrize("action", [Action.READ, Action.LIST])
def test_assert_RL_time(resource_client, action):
resource_client.assert_time(time.time() - 29, time.time(), action)
@pytest.mark.parametrize("action", [Action.CREATE, Action.UPDATE, Action.DELETE])
def test_assert_CUD_time_fail(resource_client, action):
with pytest.raises(AssertionError):
resource_client.assert_time(time.time() - 61, time.time(), action)
@pytest.mark.parametrize("action", [Action.READ, Action.LIST])
def test_assert_RL_time_fail(resource_client, action):
with pytest.raises(AssertionError):
resource_client.assert_time(time.time() - 31, time.time(), action)
def test_assert_primary_identifier_success(resource_client):
resource_client._update_schema(SCHEMA)
resource_client.assert_primary_identifier(
resource_client.primary_identifier_paths, {"a": 1, "b": 2, "c": 3}
)
def test_assert_primary_identifier_fail(resource_client):
with pytest.raises(AssertionError):
resource_client._update_schema(SCHEMA)
resource_client.assert_primary_identifier(
resource_client.primary_identifier_paths, {"a": 1, "b": 2}
)
def test_is_primary_identifier_equal_success(resource_client):
resource_client._update_schema(SCHEMA)
assert resource_client.is_primary_identifier_equal(
resource_client.primary_identifier_paths,
{"a": 1, "b": 2, "c": 3},
{"a": 1, "b": 2, "c": 3},
)
def test_is_primary_identifier_equal_fail(resource_client):
resource_client._update_schema(SCHEMA)
assert not resource_client.is_primary_identifier_equal(
resource_client.primary_identifier_paths,
{"a": 1, "b": 2, "c": 3},
{"a": 1, "b": 2, "c": 4},
)
def test_is_primary_identifier_equal_fail_key(resource_client):
with pytest.raises(AssertionError):
resource_client._update_schema(SCHEMA)
resource_client.is_primary_identifier_equal(
resource_client.primary_identifier_paths,
{"a": 1, "b": 2},
{"a": 1, "b": 2},
)
def test_assert_write_only_property_does_not_exist(resource_client):
schema = {
"a": {"type": "number", "const": 1},
"b": {"type": "number", "const": 2},
"c": {"type": "number", "const": 3},
}
resource_client._update_schema(schema)
resource_client.assert_write_only_property_does_not_exist(schema)
@pytest.mark.parametrize("schema", [SCHEMA, SCHEMA_WITH_MULTIPLE_WRITE_PROPERTIES])
def test_assert_write_only_property_does_not_exist_success(resource_client, schema):
created_resource = {"a": None, "b": 2, "c": 3}
resource_client._update_schema(schema)
resource_client.assert_write_only_property_does_not_exist(created_resource)
@pytest.mark.parametrize("schema", [SCHEMA, SCHEMA_WITH_MULTIPLE_WRITE_PROPERTIES])
def test_assert_write_only_property_does_not_exist_fail(resource_client, schema):
with pytest.raises(AssertionError):
created_resource = {"a": 1, "b": 2, "c": 3, "d": 4}
resource_client._update_schema(schema)
resource_client.assert_write_only_property_does_not_exist(created_resource)
def test_generate_create_example_with_inputs(resource_client_inputs):
assert resource_client_inputs.generate_create_example() == {"a": 1}
def test_generate_invalid_create_example_with_inputs(resource_client_inputs):
assert resource_client_inputs.generate_invalid_create_example() == {"b": 2}
def test_generate_update_example_with_inputs(resource_client_inputs):
assert resource_client_inputs.generate_update_example({"a": 1}) == {"a": 2}
def test_generate_invalid_update_example_with_inputs(resource_client_inputs):
assert resource_client_inputs.generate_invalid_update_example({"a": 1}) == {"b": 2}
def test_generate_update_example_with_primary_identifier(resource_client_inputs_schema):
created_resource = resource_client_inputs_schema.generate_create_example()
# adding read only property to denote a realistic scenario
created_resource["b"] = 2
updated_resource = resource_client_inputs_schema.generate_update_example(
created_resource
)
assert updated_resource == {"a": 1, "c": 2, "b": 2}
def test_generate_update_example_with_composite_key(
resource_client_inputs_composite_key,
):
created_resource = resource_client_inputs_composite_key.generate_create_example()
created_resource.update({"d": 3}) # mocking value of d as it is a readOnly property
updated_resource = resource_client_inputs_composite_key.generate_update_example(
created_resource
)
assert updated_resource == {"a": 1, "c": 2, "d": 3}
def test_compare_should_pass(resource_client):
resource_client._update_schema(SCHEMA_WITH_NESTED_PROPERTIES)
inputs = {
"b": {"d": 1},
"f": [{"d": 1}],
"h": [{"d": 1}, {"d": 2}],
"i": ["abc", "ghi"],
}
outputs = {
"b": {"d": 1, "e": 3},
"f": [{"d": 1, "e": 2}],
"h": [{"d": 1, "e": 3}, {"d": 2}],
"i": ["abc", "ghi"],
}
resource_client.compare(inputs, outputs)
def test_compare_should_throw_exception(resource_client):
resource_client._update_schema(SCHEMA_WITH_NESTED_PROPERTIES)
inputs = {"b": {"d": 1}, "f": [{"d": 1}], "h": [{"d": 1}], "z": 1}
outputs = {
"b": {"d": 1, "e": 2},
"f": [{"d": 1}],
"h": [{"d": 1}],
}
try:
resource_client.compare(inputs, outputs)
except AssertionError:
logging.debug("This test expects Assertion Exception to be thrown")
@pytest.mark.parametrize(
"inputs,outputs,schema_fragment",
[
(
{"CollectionToCompare": ["item1", "item2", "item3"]},
{"CollectionToCompare": ["item3", "item2", "item1"]},
{"properties": {"CollectionToCompare": {"insertionOrder": False}}},
),
(
{"CollectionToCompare": ["item1", "item2", "item3"]},
{"CollectionToCompare": ["item1", "item2", "item3"]},
{"properties": {"CollectionToCompare": {"insertionOrder": True}}},
),
(
{
"CollectionToCompare": [
"item1",
"item2",
"item3",
{"i": ["item1", "item2"]},
[
{"j1": {"z": {"l": 10}}, "k3": ["item5", "item4", "item1"]},
{"j": {"z": {"l": 10}}, "k": ["item4", "item3", "item2"]},
],
]
},
{
"CollectionToCompare": [
"item3",
"item2",
"item1",
{"i": ["item2", "item1"]},
[
{"j": {"k": ["item2", "item3", "item4"], "z": {"l": 10}}},
{"j1": {"k3": ["item1", "item5", "item4"], "z": {"l": 10}}},
],
]
},
{"properties": {"CollectionToCompare": {"insertionOrder": False}}},
),
(
{
"Collection": {
"PropertyA": {"A": True},
"CollectionToCompare": ["item1", "item2", "item3"],
}
},
{
"Collection": {
"PropertyA": {"A": True},
"CollectionToCompare": ["item3", "item2", "item1"],
}
},
{
"definitions": {
"PropertyA": {
"type": "object",
"additionalProperties": False,
"properties": {"A": {"type": "boolean"}},
},
"Collection": {
"type": "object",
"additionalProperties": False,
"properties": {
"PropertyA": {"$ref": "#/definitions/PropertyA"},
"CollectionToCompare": {
"insertionOrder": False,
"type": "array",
"items": {"type": "string", "minItems": 1},
},
},
},
},
"properties": {"Collection": {"$ref": "#/definitions/Collection"}},
},
),
(
{
"Collections": [
{
"InnerCollection": {
"Items": ["item2", "item1"],
"IntegerProperty": 10,
}
}
]
},
{
"Collections": [
{
"InnerCollection": {
"Items": ["item1", "item2"],
"IntegerProperty": 10,
}
}
]
},
{
"definitions": {
"InnerCollection": {
"type": "object",
"properties": {
"Items": {
"type": "array",
"insertionOrder": False,
"items": {"type": "string"},
},
"IntegerProperty": {"type": "integer"},
},
},
"Collection": {
"type": "object",
"properties": {
"InnerCollection": {"$ref": "#/definitions/InnerCollection"}
},
},
},
"properties": {
"Collections": {
"type": "array",
"uniqueItems": True,
"items": {"$ref": "#/definitions/Collection"},
},
},
},
),
],
)
def test_compare_collection(resource_client, inputs, outputs, schema_fragment):
resource_client._update_schema(schema_fragment)
resource_client.compare(inputs, outputs)
def test_compare_should_throw_key_error(resource_client):
resource_client._update_schema(SCHEMA_WITH_NESTED_PROPERTIES)
inputs = {"b": {"d": 1}, "f": [{"d": 1}], "h": [{"d": 1}]}
outputs = {"b": {"d": 1, "e": 2}, "f": [{"d": 1, "e": 2}, {"d": 2, "e": 3}]}
try:
resource_client.compare(inputs, outputs)
except AssertionError:
logging.debug("This test expects Assertion Exception to be thrown")
def test_compare_ordered_list_throws_assertion_exception(resource_client):
resource_client._update_schema(SCHEMA_WITH_NESTED_PROPERTIES)
inputs = {"b": {"d": 1}, "f": [{"d": 1}], "h": [{"d": 1}], "i": ["abc", "ghi"]}
outputs = {
"b": {"d": 1, "e": 2},
"f": [{"e": 2}, {"d": 2, "e": 3}],
"i": ["abc", "ghi", "tt"],
}
try:
resource_client.compare(inputs, outputs)
except AssertionError:
logging.debug("This test expects Assertion Exception to be thrown")
| 32.173913
| 94
| 0.583254
|
f50567f32bb9bea334374dca83fba1fd5825d693
| 13,847
|
py
|
Python
|
peitho/errors_and_parsers/abc_sysbio/abcsysbio_parser/Parser.py
|
MichaelPHStumpf/Peitho
|
a4daa9a3b2d8960079573d08d5baa019b5ac857e
|
[
"MIT"
] | 1
|
2018-01-05T21:59:49.000Z
|
2018-01-05T21:59:49.000Z
|
peitho/errors_and_parsers/abc_sysbio/abcsysbio_parser/Parser.py
|
MichaelPHStumpf/Peitho
|
a4daa9a3b2d8960079573d08d5baa019b5ac857e
|
[
"MIT"
] | null | null | null |
peitho/errors_and_parsers/abc_sysbio/abcsysbio_parser/Parser.py
|
MichaelPHStumpf/Peitho
|
a4daa9a3b2d8960079573d08d5baa019b5ac857e
|
[
"MIT"
] | 3
|
2018-01-05T22:00:09.000Z
|
2018-12-25T13:32:10.000Z
|
from numpy import *
from libsbml import *
import re
import os
from peitho.errors_and_parsers.abc_sysbio.abcsysbio.relations import *
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.CWriter import CWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.SDEPythonWriter import SDEPythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.ODEPythonWriter import ODEPythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.GillespiePythonWriter import GillespiePythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.SDECUDAWriter import SdeCUDAWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.ODECUDAWriter import OdeCUDAWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.GillespieCUDAWriter import GillespieCUDAWriter
class Parser:
def __init__(self, sbmlFileName, modelName, integrationType, method, inputPath="", outputPath=""):
c=re.compile('C', re.IGNORECASE)
py=re.compile('Python', re.I)
cuda=re.compile('CUDA', re.I)
gil=re.compile('Gillespie', re.I)
ode=re.compile('ODE', re.I)
sde=re.compile('SDE', re.I)
euler=re.compile('Euler', re.I)
heun=re.compile('Heun', re.I)
milstein=re.compile('Milstein', re.I)
if(cuda.search(integrationType)):
if(gil.search(integrationType)):
self.writer = GillespieCUDAWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(ode.search(integrationType)):
self.writer = OdeCUDAWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(sde.search(integrationType)):
self.writer = SdeCUDAWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(c.search(integrationType)):
self.writer = CWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(py.search(integrationType)):
if(gil.search(integrationType)):
self.writer = GillespiePythonWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(ode.search(integrationType)):
self.writer = ODEPythonWriter(sbmlFileName, modelName, inputPath, outputPath)
elif(sde.search(integrationType)):
self.writer = SDEPythonWriter(sbmlFileName, modelName, inputPath, outputPath)
reader = SBMLReader()
document = reader.readSBML(inputPath+sbmlFileName)
self.sbmlModel = document.getModel()
self.parameterId = []
self.listOfSpecies = [] #Used by the child
self.speciesId = []
self.product = []
self.reactant = []
self.S1 = []
self.S2 = []
self.listOfReactions = [] #Used by the child
self.listOfAssignmentRules = []
self.numLocalParameters = [] #Used by the child
self.comp = 0
self.parse()
if((py.search(integrationType) or cuda.search(integrationType)) and sde.search(integrationType)):
self.writer.write(method)
else:
self.writer.write()
def parse(self):
self.getBasicModelProperties()
self.writer.parsedModel.stoichiometricMatrix = empty([self.writer.parsedModel.numSpecies, self.writer.parsedModel.numReactions])
self.getCompartmentVolume()
def getBasicModelProperties(self):
self.writer.parsedModel.numSpecies = self.sbmlModel.getNumSpecies()
self.writer.parsedModel.numReactions = self.sbmlModel.getNumReactions()
self.writer.parsedModel.numGlobalParameters = self.sbmlModel.getNumParameters()
def getCompartmentVolume(self):
listOfCompartments = self.sbmlModel.getListOfCompartments()
for i in range(0, len(listOfCompartments)):
if listOfCompartments[i].isSetVolume():
self.comp = self.comp + 1
self.parameterId.append(listOfCompartments[i].getId())
self.writer.parsedModel.parameterId.append('compartment' + repr(i + 1))
self.writer.parsedModel.parameter.append(listOfCompartments[i].getVolume())
self.writer.parsedModel.listOfParameter.append(self.sbmlModel.getCompartment(i))
def getGlobalParameters(self):
#Differs between CUDA and Python/C
for i in range(0, self.writer.parsedModel.numGlobalParameters):
self.parameterId.append(self.sbmlModel.getParameter(i).getId())
self.writer.parsedModel.parameter.append(self.sbmlModel.getParameter(i).getValue())
self.writer.parsedModel.listOfParameter.append(self.sbmlModel.getParameter(i))
def getSpecies(self):
#Differs between CUDA and Python/C
self.listOfSpecies = self.sbmlModel.getListOfSpecies()
for k in range(0, len(self.listOfSpecies)):
self.writer.parsedModel.species.append(self.listOfSpecies[k])
self.speciesId.append(self.listOfSpecies[k].getId())
self.S1.append(0.0)
self.S2.append(0.0)
self.reactant.append(0)
self.product.append(0)
self.writer.parsedModel.initValues.append(self.getSpeciesValue(self.listOfSpecies[k]))#Only used by the python writer
def analyseModelStructure(self):
#Differs between CUDA and Python/C
reaction = []
numReactants = []
numProducts = []
self.listOfReactions = self.sbmlModel.getListOfReactions()
for i in range(0, len(self.listOfReactions)):
for a in range(0, len(self.writer.parsedModel.species)):
self.S1[a] = 0.0
self.S2[a] = 0.0
numReactants.append(self.listOfReactions[i].getNumReactants())
numProducts.append(self.listOfReactions[i].getNumProducts())
self.writer.parsedModel.kineticLaw.append(self.listOfReactions[i].getKineticLaw().getFormula())
self.numLocalParameters.append(self.listOfReactions[i].getKineticLaw().getNumParameters())
for j in range(0, numReactants[i]):
self.reactant[j] = self.listOfReactions[i].getReactant(j)
for k in range(0, len(self.writer.parsedModel.species)):
if(self.reactant[j].getSpecies() == self.writer.parsedModel.species[k].getId()):
self.S1[k] = self.reactant[j].getStoichiometry()
for l in range(0, numProducts[i]):
self.product[l] = self.listOfReactions[i].getProduct(l)
for k in range(0, len(self.writer.parsedModel.species)):
if(self.product[l].getSpecies() == self.writer.parsedModel.species[k].getId()):
self.S2[k] = self.product[l].getStoichiometry()
for m in range(0, len(self.writer.parsedModel.species)):
self.writer.parsedModel.stoichiometricMatrix[m][i] = -self.S1[m] + self.S2[m]
for n in range(0, self.numLocalParameters[i]):
self.writer.parsedModel.parameter.append(self.listOfReactions[i].getKineticLaw().getParameter(n).getValue())
self.writer.parsedModel.listOfParameter.append(self.listOfReactions[i].getKineticLaw().getParameter(n))
for n in range(0, self.comp):
name = self.parameterId[n]
new_name = 'compartment' + repr(n + 1)
node = self.sbmlModel.getReaction(i).getKineticLaw().getMath()
new_node = self.rename(node, name, new_name)
self.writer.parsedModel.kineticLaw[i] = formulaToString(new_node)
def analyseFunctions(self):
sbmlListOfFunctions = self.sbmlModel.getListOfFunctionDefinitions()
for fun in range(0, len(sbmlListOfFunctions)):
self.writer.parsedModel.listOfFunctions.append(sbmlListOfFunctions[fun])
self.writer.parsedModel.functionArgument.append([])
self.writer.parsedModel.functionBody.append(formulaToString(self.writer.parsedModel.listOfFunctions[fun].getBody()))
for funArg in range(0, self.writer.parsedModel.listOfFunctions[fun].getNumArguments()):
self.writer.parsedModel.functionArgument[fun].append(formulaToString(self.writer.parsedModel.listOfFunctions[fun].getArgument(funArg)))
name = self.writer.parsedModel.functionArgument[fun][funArg]
node = self.writer.parsedModel.listOfFunctions[fun].getBody()
new_node = self.rename(node, name, "a" + repr(funArg + 1))
self.writer.parsedModel.functionBody[fun] = formulaToString(new_node)
self.writer.parsedModel.functionArgument[fun][funArg] = "a" + repr(funArg + 1)
def analyseRules(self):
self.writer.parsedModel.listOfRules = self.sbmlModel.getListOfRules()
for rule in range(0, len(self.writer.parsedModel.listOfRules)):
self.writer.parsedModel.ruleFormula.append(self.writer.parsedModel.listOfRules[rule].getFormula())
self.writer.parsedModel.ruleVariable.append(self.writer.parsedModel.listOfRules[rule].getVariable())
def analyseEvents(self):
self.writer.parsedModel.listOfEvents = self.sbmlModel.getListOfEvents()
for event in range(0, len(self.writer.parsedModel.listOfEvents)):
self.writer.parsedModel.eventCondition.append(formulaToString(self.writer.parsedModel.listOfEvents[event].getTrigger().getMath()))
self.listOfAssignmentRules = self.writer.parsedModel.listOfEvents[event].getListOfEventAssignments()
self.writer.parsedModel.eventVariable.append([])
self.writer.parsedModel.eventFormula.append([])
for rule in range(0, len(self.listOfAssignmentRules)):
self.writer.parsedModel.eventVariable[event].append(self.listOfAssignmentRules[rule].getVariable())
self.writer.parsedModel.eventFormula[event].append(formulaToString(self.listOfAssignmentRules[rule].getMath()))
def renameEverything(self):
NAMES = [[], []]
NAMES[0].append(self.parameterId)
NAMES[0].append(self.writer.parsedModel.parameterId)
NAMES[1].append(self.speciesId)
NAMES[1].append(self.writer.parsedModel.speciesId)
for nam in range(0, 2):
for i in range(0, len(NAMES[nam][0])):
name = NAMES[nam][0][i]
new_name = NAMES[nam][1][i]
for k in range(0, self.writer.parsedModel.numReactions):
node = self.sbmlModel.getReaction(k).getKineticLaw().getMath()
new_node = self.rename(node, name, new_name)
self.writer.parsedModel.kineticLaw[k] = formulaToString(new_node)
for k in range(0, len(self.writer.parsedModel.listOfRules)):
node = self.writer.parsedModel.listOfRules[k].getMath()
new_node = self.rename(node, name, new_name)
self.writer.parsedModel.ruleFormula[k] = formulaToString(new_node)
if self.writer.parsedModel.ruleVariable[k] == name: self.writer.parsedModel.ruleVariable[k] = new_name
for k in range(0, len(self.writer.parsedModel.listOfEvents)):
node = self.writer.parsedModel.listOfEvents[k].getTrigger().getMath()
new_node = self.rename(node, name, new_name)
self.writer.parsedModel.eventCondition[k] = formulaToString(new_node)
self.listOfAssignmentRules = self.writer.parsedModel.listOfEvents[k].getListOfEventAssignments()
for cond in range(0, len(self.listOfAssignmentRules)):
node = self.listOfAssignmentRules[cond].getMath()
new_node = self.rename(node, name, new_name)
self.writer.parsedModel.eventFormula[k][cond] = formulaToString(new_node)
if self.writer.parsedModel.eventVariable[k][cond] == name: self.writer.parsedModel.eventVariable[k][cond] = new_name
def rename(self, node, name, new_name):
typ = node.getType()
if (typ == AST_NAME or typ == AST_NAME_TIME):
nme = node.getName()
if nme == name:
node.setName(new_name)
for n in range(0, node.getNumChildren()):
self.rename(node.getChild(n), name, new_name)
return node
def getSpeciesValue(self, specie):
if specie.isSetInitialAmount() and specie.isSetInitialConcentration():
return specie.getInitialConcentration() #The initial values are only used in ODE and SDE solvers so we take the concentration (if it was used in gillespie we would have taken the value)
if specie.isSetInitialAmount():
return specie.getInitialAmount()
else:
return specie.getInitialConcentration()
| 55.834677
| 205
| 0.611252
|
1f16df828caf389574d2479fae2f71787fc859b6
| 19,682
|
py
|
Python
|
aps/transform/utils.py
|
LvHang/aps
|
3e9c8b247e0526481970c28e8af1a6a93cc7f2cc
|
[
"Apache-2.0"
] | 5
|
2021-07-05T12:21:44.000Z
|
2021-11-23T08:09:45.000Z
|
aps/transform/utils.py
|
LvHang/aps
|
3e9c8b247e0526481970c28e8af1a6a93cc7f2cc
|
[
"Apache-2.0"
] | null | null | null |
aps/transform/utils.py
|
LvHang/aps
|
3e9c8b247e0526481970c28e8af1a6a93cc7f2cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON, TORCH_VERSION
from typing import Optional, Union, Tuple
if TORCH_VERSION >= 1.7:
from torch.fft import fft as fft_func
else:
pass
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
# W x B x 2
if TORCH_VERSION >= 1.7:
K = fft_func(th.eye(B) / S, dim=-1)
K = th.stack([K.real, K.imag], dim=-1)
else:
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: "kaldi"|"librosa", slight difference on applying window function
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode="librosa") -> None:
super(STFTBase, self).__init__()
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.frame_len = frame_len
self.frame_hop = frame_hop
self.onesided = onesided
self.pre_emphasis = pre_emphasis
self.center = center
self.mode = mode
self.num_bins = self.K.shape[0] // 4 + 1
self.expr = (
f"window={window}, stride={frame_hop}, onesided={onesided}, " +
f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " +
f"center={self.center}, mode={self.mode}, " +
f"kernel_size={self.num_bins}x{self.K.shape[2]}")
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
if th.sum(wav_len <= self.frame_len):
raise RuntimeError(
f"Audio samples less than frame_len ({self.frame_len})")
kernel_size = self.K.shape[-1]
if self.center:
wav_len += kernel_size
return (wav_len - kernel_size) // self.frame_hop + 1
def extra_repr(self) -> str:
return self.expr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(
self,
wav: th.Tensor,
output: str = "polar"
) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor or [Tensor, Tensor]), N x (C) x F x T
"""
return _forward_stft(wav,
self.K,
output=output,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
input: str = "polar") -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor or [Tensor, Tensor]), STFT output
Return
s (Tensor), N x S
"""
return _inverse_stft(transform,
self.K,
self.w,
input=input,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center)
| 34.712522
| 121
| 0.538919
|
247d321ce0c16d5bb207856247ce1707ddf9bd17
| 15,527
|
py
|
Python
|
qa327_frontend_test/test_r1.py
|
RF0606/CISC327_PROJECT
|
b0e5839fdc1b6f754bbf05ce174feca9dac54a69
|
[
"MIT"
] | null | null | null |
qa327_frontend_test/test_r1.py
|
RF0606/CISC327_PROJECT
|
b0e5839fdc1b6f754bbf05ce174feca9dac54a69
|
[
"MIT"
] | null | null | null |
qa327_frontend_test/test_r1.py
|
RF0606/CISC327_PROJECT
|
b0e5839fdc1b6f754bbf05ce174feca9dac54a69
|
[
"MIT"
] | null | null | null |
from importlib import reload
import pytest
import os
import io
import sys
import qa327.app as app
path = os.path.dirname(os.path.abspath(__file__))
'''test case for R1.1: Test if user is logged in'''
def test_loggedIn(capsys):
if app.status:
terminal_input = ['logout', 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'logout successfully',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.2: Test if user is not logged in'''
def test_notlogged(capsys):
if not app.status:
terminal_input = ["exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.1: enter buy can go to buy session when user is logged in'''
def test_goBuy_logged(capsys):
if app.status:
terminal_input = ["buy", 'logout', 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'buying session started successfully',
'please type ticket name, quantity:',
'please retype',
'the number of inputs should be 2',
'type your choice:',
'register login exit',
'exit'
]
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.2: enter sell can go to sell session when user is logged in'''
def test_goSell_logged(capsys):
if app.status:
terminal_input = ["sell", 'logout', 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'selling session started successfully',
'please type ticket name, price, quantity, date:',
'please retype',
'the number of inputs should be 4',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.3: enter update can go to update session when user is logged in'''
def test_goUpdate_logged(capsys):
if app.status:
terminal_input = ["update", 'logout', 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'updating session started successfully',
'please type ticket name, price, quantity, date:',
'please retype',
'the number of inputs should be 4',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.4: enter logout can go to out session when user is logged in'''
def test_logout_successfully(capsys):
if app.status:
terminal_input = ["logout", 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'please retype',
'the number of inputs should be 2'
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.5: enter login can go to login session when user is not logged in'''
def test_login_whenNotLoggedIn(capsys):
if not app.status:
terminal_input = ["login", "logout", "exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'login session started successfully',
'please type your email and password:',
'please retype',
'the number of inputs should be 2',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.6: enter register can go to register session when user is not logged in'''
def test_register_successfully(capsys):
if not app.status:
terminal_input = ["register", 'logout', 'exit', 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'register session started successfully',
'please enter your email, user name, password and '
'confirm your password:',
'please retype',
'the number of inputs should be 4 or exit',
'do you want to exit register session(type exit to leave):type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.3.7: enter exit can exit the program when user is not logged in'''
def test_exit_successfully(capsys):
if not app.status:
terminal_input = ["exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.4.1: when user is not logged in, buy command are not accepted'''
def test_goBuy_notLogged(capsys):
if not app.status:
terminal_input = ["buy", 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'invalid command',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.4.2: when user is not logged in, sell command are not accepted'''
def test_goSell_notLogged(capsys):
if not app.status:
terminal_input = ["sell", 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'invalid command',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.4.3: when user is not logged in, update command are not accepted'''
def test_goUpdate_notLogged(capsys):
if not app.status:
terminal_input = ["update", 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'invalid command',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.4.4: when user is not logged in, logout command are not accepted'''
def test_logout_fail(capsys):
if not app.status:
terminal_input = ["logout", 'exit']
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'type your choice:',
'register login exit',
'invalid command',
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.5.1: when user is logged in, login command are not accepted'''
def test_login_fail(capsys):
if app.status:
terminal_input = ["login", "logout", "exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'invalid command'
'your balance: 1000',
'type your choice:',
'sell buy update logout',
"logout successfully",
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.5.2: when user is logged in, register command are not accepted'''
def test_register_fail(capsys):
if app.status:
terminal_input = ["register", "logout", "exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'invalid command'
'your balance: 1000',
'type your choice:',
'sell buy update logout',
"logout successfully",
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output)
'''test case for R1.5.3: when user is logged in, exit command are not accepted'''
def test_exit_fail(capsys):
if app.status:
terminal_input = ["exit", "logout", "exit"]
expected_tail_of_terminal_output = ['Welcome the Queens ticket trade machine',
'your balance: 1000',
'type your choice:',
'sell buy update logout',
'invalid command'
'your balance: 1000',
'type your choice:',
'sell buy update logout',
"logout successfully",
'type your choice:',
'register login exit',
'exit']
helper(capsys, terminal_input, expected_tail_of_terminal_output, )
def helper(
capsys,
terminal_input,
expected_tail_of_terminal_output):
"""Helper function for testing
Arguments:
capsys -- object created by pytest to capture stdout and stderr
terminal_input -- list of string for terminal input
expected_tail_of_terminal_output list of expected string at the tail of terminal
intput_valid_accounts -- list of valid accounts in the valid_account_list_file
expected_output_transactions -- list of expected output transactions
"""
# cleanup package
reload(app)
# set terminal input
sys.stdin = io.StringIO(
'\n'.join(terminal_input))
# run the program
with pytest.raises(SystemExit):
app.main()
# capture terminal output / errors
# assuming that in this case we don't use stderr
out, err = capsys.readouterr()
# split terminal output in lines
out_lines = out.splitlines()
# compare terminal outputs at the end.`
for i in range(1, len(expected_tail_of_terminal_output) + 1):
index = i * -1
assert expected_tail_of_terminal_output[index] == out_lines[index]
| 44.236467
| 121
| 0.448702
|
9ee1225cb9bd79e8c8d6a6747084a5b1966dbff8
| 2,876
|
py
|
Python
|
process_plan_rates.py
|
sleibman/health-plan-stats
|
feec61d282cfa2102b8632d8ecc4c1696ed68bfd
|
[
"MIT"
] | null | null | null |
process_plan_rates.py
|
sleibman/health-plan-stats
|
feec61d282cfa2102b8632d8ecc4c1696ed68bfd
|
[
"MIT"
] | null | null | null |
process_plan_rates.py
|
sleibman/health-plan-stats
|
feec61d282cfa2102b8632d8ecc4c1696ed68bfd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Executable script for computing the Second Lowest Cost Silver Plan (SLCSP).
This code implements the assignment described at https://homework.adhoc.team/slcsp/
For a complete description of this implementation, see https://github.com/sleibman/health-plan-stats
Typical usage example:
./process_plan_rates.py
Log messages are written to stderr. Since the specified behavior mandates that the results be sent to stdout, it may be
helpful to redirect stderr to a file, via:
./process_plan_rates.py 2> slcsp.log
"""
import logging
import os
import pandas as pd
from healthplans import slcsp
def load_and_process_csv(slcsp_file, plans_file, zips_file):
"""Reads input csv files into pandas DataFrames and executes core SLCSP logic.
Example input files can be found at https://github.com/sleibman/health-plan-stats/sample_data
Args:
slcsp_file (str): Path and filename for slcsp.csv
plans_file (str): Path and filename for plans.csv
zips_file (str): Path and filename for zips.csv
Returns:
str: A string in csv format, suitable for writing to stdout or a csv file.
"""
# Notice that we force ZIP codes to be treated as non-numeric strings, because they are identifiers with things like
# leading zeros and no meaningful numeric operations. Same with county codes, which happen to be FIPS codes with
# properties similar to ZIPs.
# We allow the plan rates to be represented as floats, even though they are currency values. If the application
# were extended to manipulate these values in any way, this would merit some care in order to ensure the desired
# rounding behavior.
desired_zipcodes_df = pd.read_csv(slcsp_file, dtype={'zipcode': 'str'})
plans_df = pd.read_csv(plans_file, dtype={'plan_id': 'str'})
zips_df = pd.read_csv(zips_file, dtype={'zipcode': 'str', 'county_code': 'str'})
results_df = slcsp.process_rates(desired_zipcodes_df, plans_df, zips_df)
return results_df.to_csv(index=False, float_format='%.2f')
if __name__ == "__main__":
# TODO: In a more complete application, logging would be made easily configurable. In this case, the log level is
# hardcoded, and log output goes to stderr.
logging.basicConfig(level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(levelname)s - %(message)s')
# TODO: Provide the ability to override input file paths on the command line or via config file.
top_level_dir = os.path.dirname(os.path.realpath(__file__))
sample_data_dir = os.path.join(top_level_dir, 'sample_data')
slcsp_csv = os.path.join(sample_data_dir, 'slcsp.csv')
plans_csv = os.path.join(sample_data_dir, 'plans.csv')
zips_csv = os.path.join(sample_data_dir, 'zips.csv')
print(load_and_process_csv(slcsp_csv, plans_csv, zips_csv))
| 44.9375
| 120
| 0.729138
|
c9cc98073f04612bebb61116b6aecc6938fa97d6
| 7,246
|
py
|
Python
|
hgail/policies/latent_sampler.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 24
|
2018-03-16T22:29:16.000Z
|
2021-11-12T07:33:28.000Z
|
hgail/policies/latent_sampler.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 2
|
2018-06-29T06:37:46.000Z
|
2018-08-06T01:02:13.000Z
|
hgail/policies/latent_sampler.py
|
Kailiangdong/hgail
|
a668c4dda09d4e7f85b4640f42ff57b6764d24cc
|
[
"MIT"
] | 15
|
2018-07-30T16:46:07.000Z
|
2022-03-13T06:24:11.000Z
|
from rllab.core.serializable import Serializable
from rllab.misc.overrides import overrides
import copy
import numpy as np
import tensorflow as tf
class LatentSampler(object):
'''
Mixin class to be used when making a class intended to sample latent variables.
Since this is a mixin, we add the **kwargs and super call.
'''
def __init__(
self,
name,
dim,
latent_name='latent',
**kwargs):
super(LatentSampler, self).__init__(**kwargs)
self.name = name
self.dim = dim
self.latent_name = latent_name
self._build()
@property
def vectorized(self):
return True
@property
def state_info_specs(self):
'''
All the inheriting classes can use this because we handle the setting up
of the paths separately such that the optimizers think this is the only
additional state information needed.
'''
return [(self.latent_name, (self.dim,))]
def merge_sym(self, obs_var, state_info_vars=None):
'''
Symbolically merges the input variable with the latent variable of the sampler
Args:
- obs_var: symbolic variable to merge with, shape = (?, dim)
- state_info_vars: dictionary containing symbolic variables
relevant to this latent sampler
'''
with tf.variable_scope(self.name):
if state_info_vars is not None and self.latent_name in state_info_vars.keys():
latent = state_info_vars[self.latent_name]
else:
latent = self.latent
merged = tf.concat([obs_var, latent], axis=-1)
return merged
def merge(self, obs, state_infos):
'''
Numeric equivalent to merge_sym - combines obs and state_infos
Args:
- obs: observation
- state_infos: dict with (key, value) pairs
relevant to this latent sampler
'''
return np.hstack((obs, state_infos[self.latent_name]))
def _build(self):
with tf.variable_scope(self.name):
self.latent = tf.placeholder(tf.float32, shape=(None, self.dim), name=self.latent_name)
def __getstate__(self):
return dict(
name=self.name,
dim=self.dim,
latent_name=self.latent_name
)
def __setstate__(self, d):
self.name = d['name']
self.dim = d['dim']
self.latent_name = d['latent_name']
self._build()
def _categorical_latent_variable(dim, n_samples, pvals=None):
pvals = np.ones(dim) / dim if pvals is None else pvals
return np.random.multinomial(1, pvals, size=n_samples)
def _gaussian_latent_variable(dim, n_samples):
return np.random.multivariate_normal(
mean=np.zeros(dim),
cov=np.eye(dim),
size=n_samples
)
def _build_latent_variable_function(variable_type):
'''
Factory method used because variable_type is used in multiple locations,
and it is easier to pass around the string than it is to pass around one
of these methods and check for type infomation each time
'''
if variable_type == 'categorical':
return _categorical_latent_variable
elif variable_type == 'gaussian':
return _gaussian_latent_variable
else:
raise ValueError('variable_type not implemented: {}'.format(variable_type))
class UniformlyRandomLatentSampler(LatentSampler):
def __init__(
self,
scheduler,
variable_type='categorical',
**kwargs):
super(UniformlyRandomLatentSampler, self).__init__(**kwargs)
self.scheduler = scheduler
self.variable_type = variable_type
self.n_samples = None
self._latent_variable_function = _build_latent_variable_function(variable_type)
def _update_latent_variables(self, observations):
'''
Updates latent variables based on what the scheduler says.
Args:
- observations: numpy array of shape (?, obs_dim)
'''
indicators = self.scheduler.should_update(observations)
if any(indicators):
new_latent = self._latent_variable_function(
dim=self.dim, n_samples=self.n_samples)
for (i, indicator) in enumerate(indicators[:self.n_samples]):
if indicator:
self.latent_values[i] = new_latent[i]
def encode(self, observations):
'''
For the case where the observations are available before hand, for example in
the supervised case, this function allows for iterating the latent sampler to
get the latent values at each timestep. This is essentially performing inference
/ recognition / encoding, so it's named encode to be symmetric with encoders.
Args:
- observations: shape (n_samples, timesteps, input_dim) array
'''
n_samples, timesteps, _ = observations.shape
self.reset([True] * n_samples)
latents = np.zeros((n_samples, timesteps, self.dim))
for t in range(timesteps):
latents[:,t,:], _ = self.get_actions(observations[:,t])
return latents
def get_action(self, observation):
'''
Returns latent variable associated with current timestep and obs.
Args:
- observation: numpy array of shape (1, obs_dim)
'''
self._update_latent_variables(observation)
return copy.deepcopy(self.latent_values[0]), dict(latent=copy.deepcopy(self.latent_values[0]))
def get_actions(self, observations):
'''
Returns latent variables for current timestep and observations.
Args:
- observations: numpy array of shape (num_envs, obs_dim)
'''
self._update_latent_variables(observations)
assert len(observations) == len(self.latent_values)
return copy.deepcopy(self.latent_values), dict(latent=copy.deepcopy(self.latent_values))
def reset(self, dones=None):
'''
resamples latent variables for the envionments which have just
completed an episode (dones[i] == True -> resample var i)
Args:
- dones: list of bools indicating whether the corresponding
environment has recently reached a terminal state
'''
dones = [True] if dones is None else dones
if self.n_samples is None or len(dones) != self.n_samples:
self.n_samples = len(dones)
self.latent_values = self._latent_variable_function(
dim=self.dim, n_samples=self.n_samples)
self.scheduler.reset(dones)
def __getstate__(self):
d = super(UniformlyRandomLatentSampler, self).__getstate__()
d['scheduler'] = self.scheduler
d['variable_type'] = self.variable_type
return d
def __setstate__(self, d):
super(UniformlyRandomLatentSampler, self).__setstate__(d)
self.scheduler = d['scheduler']
self.variable_type = d['variable_type']
self.n_samples = None
self._latent_variable_function = _build_latent_variable_function(self.variable_type)
| 35.346341
| 102
| 0.635385
|
bace9a60e302759bc248a01f8e7de8d5b73bc002
| 619
|
py
|
Python
|
Python/Python For Absolute Beginner/13 If Else & Elif Conditions.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Python For Absolute Beginner/13 If Else & Elif Conditions.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
Python/Python For Absolute Beginner/13 If Else & Elif Conditions.py
|
omkarsutar1255/Python-Data
|
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
|
[
"CC-BY-3.0"
] | null | null | null |
var1=78
print("enter your value")
var2 = int(input())
if var1>var2:
print("lesser")
elif var1==var2: #elif use for else if
print("equal")
else:
print("greater")
list={1,2,3,4,5,6}
print(5 in list) #in is used for check number in list
print(24 not in list) #not in used for check number not in list
if 14 not in list:
print("yes its not in list")
print("Enter your age")
age = int(input())
if 7>age:
print("please Enter valid age")
elif 100<age:
print("please enter valid age")
elif 18<age:
print("you are allowed")
else:
print("you are not allowed")
| 20.633333
| 70
| 0.625202
|
e49f60272dfb6b862154be28f17183cfa0252d61
| 3,539
|
py
|
Python
|
src/gen_labels.py
|
FunmiKesa/JLA
|
4fcd6a0a382d451a54703e432e476c3a16166232
|
[
"MIT"
] | 5
|
2021-11-22T16:17:17.000Z
|
2022-02-17T13:06:14.000Z
|
src/gen_labels.py
|
FunmiKesa/JLA
|
4fcd6a0a382d451a54703e432e476c3a16166232
|
[
"MIT"
] | 1
|
2021-11-29T15:09:57.000Z
|
2021-11-30T09:30:49.000Z
|
src/gen_labels.py
|
FunmiKesa/JLA
|
4fcd6a0a382d451a54703e432e476c3a16166232
|
[
"MIT"
] | null | null | null |
import os.path as osp
import os
import numpy as np
import shutil
def mkdirs(d):
if not osp.exists(d):
os.makedirs(d)
def gen_labels_15(seq_root, label_root, seq_label="img1", gt_label="gt"):
seqs = [s for s in os.listdir(seq_root)]
seqs.sort()
tid_curr = 0
tid_last = -1
for seq in seqs:
print(seq)
with open(osp.join(seq_root, seq, 'seqinfo.ini'), 'r') as file:
seq_info = file.read()
seq_width = int(seq_info[seq_info.find('imWidth=') + 8:seq_info.find('\nimHeight')])
seq_height = int(seq_info[seq_info.find('imHeight=') + 9:seq_info.find('\nimExt')])
gt_txt = osp.join(seq_root, seq, gt_label, f'{gt_label}.txt')
gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')
idx = np.lexsort(gt.T[:2, :])
gt = gt[idx, :]
seq_label_root = osp.join(label_root, seq, seq_label)
mkdirs(seq_label_root)
for fid, tid, x, y, w, h, mark, _, _, _ in gt:
if mark == 0:
continue
fid = int(fid)
tid = int(tid)
if not tid == tid_last:
tid_curr += 1
tid_last = tid
x += w / 2
y += h / 2
label_fpath = osp.join(seq_label_root, '{:06d}.txt'.format(fid))
label_str = '0 {:d} {:.6f} {:.6f} {:.6f} {:.6f}\n'.format(
tid_curr, x / seq_width, y / seq_height, w / seq_width, h / seq_height)
with open(label_fpath, 'a') as f:
f.write(label_str)
def gen_labels(seq_root, label_root, seq_label="img1", gt_label="gt"):
seqs = [s for s in os.listdir(seq_root)]
seqs.sort()
tid_curr = 0
tid_last = -1
for seq in seqs:
print(seq)
seq_info = open(osp.join(seq_root, seq, 'seqinfo.ini')).read()
seq_width = int(seq_info[seq_info.find(
'imWidth=') + 8:seq_info.find('\nimHeight')])
seq_height = int(seq_info[seq_info.find(
'imHeight=') + 9:seq_info.find('\nimExt')])
gt_txt = osp.join(seq_root, seq, gt_label, f'{gt_label}.txt')
gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')
seq_label_root = osp.join(label_root, seq, seq_label)
mkdirs(seq_label_root)
for fid, tid, x, y, w, h, mark, label, _ in gt:
if mark == 0 or not label == 1:
continue
fid = int(fid)
tid = int(tid)
if not tid == tid_last:
tid_curr += 1
tid_last = tid
x += w / 2
y += h / 2
label_fpath = osp.join(seq_label_root, '{:06d}.txt'.format(fid))
label_str = '0 {:d} {:.6f} {:.6f} {:.6f} {:.6f}\n'.format(
tid_curr, x / seq_width, y / seq_height, w / seq_width, h / seq_height)
with open(label_fpath, 'a') as f:
f.write(label_str)
if __name__ == "__main__":
datasets = ["MOT15", "MOT16", "MOT17", "MOT20"]
for d in datasets:
print("\n", d)
seq_root = f'data/{d}/images/train'
label_root = f'data/{d}/labels_with_ids/train'
if not osp.exists(seq_root) | osp.exists(label_root):
print(f"{seq_root} not found or {label_root} exists!")
continue
if os.path.exists(label_root):
shutil.rmtree(label_root)
mkdirs(label_root)
if d == "MOT15":
gen_labels_15(seq_root, label_root)
else:
gen_labels(seq_root, label_root)
| 34.359223
| 96
| 0.534897
|
95f2235211d75c8de84acd61213ee5f9602a3294
| 2,886
|
py
|
Python
|
lib/gen_grid.py
|
borovik135/VisSatSatelliteStereo
|
e591e8753c48e231d2c5cce74d37df2252c4ed93
|
[
"BSD-3-Clause"
] | 37
|
2019-11-22T14:55:36.000Z
|
2022-03-27T07:52:18.000Z
|
lib/gen_grid.py
|
borovik135/VisSatSatelliteStereo
|
e591e8753c48e231d2c5cce74d37df2252c4ed93
|
[
"BSD-3-Clause"
] | 11
|
2020-02-10T16:23:25.000Z
|
2022-03-12T00:47:32.000Z
|
lib/gen_grid.py
|
borovik135/VisSatSatelliteStereo
|
e591e8753c48e231d2c5cce74d37df2252c4ed93
|
[
"BSD-3-Clause"
] | 14
|
2020-03-19T06:19:06.000Z
|
2022-02-16T07:59:38.000Z
|
# ===============================================================================================================
# Copyright (c) 2019, Cornell University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the above copyright otice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# * Neither the name of Cornell University nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# Author: Kai Zhang (kz298@cornell.edu)
#
# The research is based upon work supported by the Office of the Director of National Intelligence (ODNI),
# Intelligence Advanced Research Projects Activity (IARPA), via DOI/IBC Contract Number D17PC00287.
# The U.S. Government is authorized to reproduce and distribute copies of this work for Governmental purposes.
# ===============================================================================================================
import numpy as np
# generate a 3D grid
# x_points, y_points, z_points are numpy array
def gen_grid(x_points, y_points, z_points):
x_point_cnt = x_points.size
y_point_cnt = y_points.size
z_point_cnt = z_points.size
point_cnt = x_point_cnt * y_point_cnt * z_point_cnt
xx, yy = np.meshgrid(x_points, y_points, indexing='ij')
xx = np.reshape(xx, (-1, 1))
yy = np.reshape(yy, (-1, 1))
xx = np.tile(xx, (z_point_cnt, 1))
yy = np.tile(yy, (z_point_cnt, 1))
zz = np.zeros((point_cnt, 1))
for j in range(z_point_cnt):
idx1 = j * x_point_cnt * y_point_cnt
idx2 = (j + 1) * x_point_cnt * y_point_cnt
zz[idx1:idx2, 0] = z_points[j]
return xx, yy, zz
| 50.631579
| 115
| 0.677408
|
aa091563369b6938ac587697b1016ba8bbb89803
| 1,488
|
py
|
Python
|
bin/create_docker_commands_for_students_from_csv_file.py
|
mgalland/docker-for-teaching
|
2cf9505672f6bb64c8d7e5273b418f9239f3b121
|
[
"Apache-2.0"
] | null | null | null |
bin/create_docker_commands_for_students_from_csv_file.py
|
mgalland/docker-for-teaching
|
2cf9505672f6bb64c8d7e5273b418f9239f3b121
|
[
"Apache-2.0"
] | 2
|
2020-04-10T09:09:45.000Z
|
2020-04-10T09:10:50.000Z
|
bin/create_docker_commands_for_students_from_csv_file.py
|
ScienceParkStudyGroup/docker-master-gls
|
0928800620f4e2fb0c88e4f4d8c0785f64f28905
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Usage: python create_docker_commands_for_all_students.py [csv file with student to virtual machine correspondence] [docker image]
#
# This script returns a series of docker run command that can be used to launch several RStudio instances for all students on a virtual machine.
#
# Example: python create_docker_commands_for_all_students.py list_of_students.csv scienceparkstudygroup/master-gls:rnaseq-2021
# Input file should have comma separated values (.csv)
# Input file format should contain these columns with this naming scheme.
# student machine password port
# Maura Cook machine-01 maura 8787
# Reini van Hal machine-02 reini 8788
# ...
# Output:
# It will output to the screen the individual commands to be run in the Linux VM in the cloud
import pandas as pd
import sys
student_to_machine = sys.argv[1]
docker_image = sys.argv[2]
df = pd.read_csv(student_to_machine, sep=",")
def create_docker_command(row):
"""Takes a Pandas row and return the docker command with corresponding student name + pwd + port number"""
student = row["student"]
machine_nb = row["machine"]
password = row["password"]
port = row["port"]
docker_cmd = "docker run --detach --name " + machine_nb + " -e PASSWORD=" + password + " -p " + str(port) + ":8787" + " " + docker_image
print(docker_cmd)
return docker_cmd
docker_commands = df.apply(create_docker_command, axis = 1, result_type='reduce')
| 37.2
| 144
| 0.722446
|
72a881ada46a48e795f8dea23d4adb738b6b0d74
| 1,020
|
py
|
Python
|
tests/mock/adapters.py
|
matlegit/kdr-watchman
|
6b154641e2d3324fbab43ef70162c407a73ffd1d
|
[
"MIT"
] | null | null | null |
tests/mock/adapters.py
|
matlegit/kdr-watchman
|
6b154641e2d3324fbab43ef70162c407a73ffd1d
|
[
"MIT"
] | null | null | null |
tests/mock/adapters.py
|
matlegit/kdr-watchman
|
6b154641e2d3324fbab43ef70162c407a73ffd1d
|
[
"MIT"
] | null | null | null |
from kdr import syncthing_factory as factory
import os
home_dir = os.path.expanduser('~')
test_dir = os.path.join(home_dir, 'kdr_test')
client_conf = {
'port' : 8389,
'sync_home' : os.path.join(test_dir, 'client'),
'sync_dir' : os.path.join(test_dir, 'client', 'sync') + '/',
}
server_conf = {
'port' : 8390,
'sync_home' : os.path.join(test_dir, 'server'),
'sync_dir' : os.path.join(test_dir, 'server', 'sync') + '/',
}
if not os.path.exists(client_conf['sync_home']):
os.makedirs(client_conf['sync_home'])
if not os.path.exists(client_conf['sync_dir']):
os.makedirs(client_conf['sync_dir'])
if not os.path.exists(server_conf['sync_home']):
os.makedirs(server_conf['sync_home'])
if not os.path.exists(server_conf['sync_dir']):
os.makedirs(server_conf['sync_dir'])
client = factory.get_handler(client_conf['sync_home'])
server = factory.get_handler(server_conf['sync_home'])
if not server.ping():
server.start(server_conf['port'])
if not client.ping():
client.start(client_conf['port'])
| 26.153846
| 62
| 0.70098
|
761a2205ef0bf7ede8faad0b46924951d8411851
| 45,680
|
py
|
Python
|
aliyun-tablestore-python-sdk-master/tablestore/encoder.py
|
SuiMingYang/sales-message-classify
|
1b9ce984e907b688096c2287ad80e495034b347c
|
[
"MIT"
] | 1
|
2020-09-01T10:37:51.000Z
|
2020-09-01T10:37:51.000Z
|
aliyun-tablestore-python-sdk-master/tablestore/encoder.py
|
SuiMingYang/sales-message-classify
|
1b9ce984e907b688096c2287ad80e495034b347c
|
[
"MIT"
] | null | null | null |
aliyun-tablestore-python-sdk-master/tablestore/encoder.py
|
SuiMingYang/sales-message-classify
|
1b9ce984e907b688096c2287ad80e495034b347c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf8 -*-#
import six
from builtins import int
from tablestore.error import *
from tablestore.metadata import *
from tablestore.plainbuffer.plain_buffer_builder import *
import tablestore.protobuf.table_store_pb2 as pb2
import tablestore.protobuf.table_store_filter_pb2 as filter_pb2
import tablestore.protobuf.search_pb2 as search_pb2
INT32_MAX = 2147483647
INT32_MIN = -2147483648
PRIMARY_KEY_TYPE_MAP = {
'INTEGER' : pb2.INTEGER,
'STRING' : pb2.STRING,
'BINARY' : pb2.BINARY,
}
PRIMARY_KEY_OPTION_MAP = {
PK_AUTO_INCR : pb2.AUTO_INCREMENT,
}
LOGICAL_OPERATOR_MAP = {
LogicalOperator.NOT : filter_pb2.LO_NOT,
LogicalOperator.AND : filter_pb2.LO_AND,
LogicalOperator.OR : filter_pb2.LO_OR,
}
COMPARATOR_TYPE_MAP = {
ComparatorType.EQUAL : filter_pb2.CT_EQUAL,
ComparatorType.NOT_EQUAL : filter_pb2.CT_NOT_EQUAL,
ComparatorType.GREATER_THAN : filter_pb2.CT_GREATER_THAN,
ComparatorType.GREATER_EQUAL : filter_pb2.CT_GREATER_EQUAL,
ComparatorType.LESS_THAN : filter_pb2.CT_LESS_THAN,
ComparatorType.LESS_EQUAL : filter_pb2.CT_LESS_EQUAL,
}
COLUMN_CONDITION_TYPE_MAP = {
ColumnConditionType.COMPOSITE_COLUMN_CONDITION : filter_pb2.FT_COMPOSITE_COLUMN_VALUE,
ColumnConditionType.SINGLE_COLUMN_CONDITION : filter_pb2.FT_SINGLE_COLUMN_VALUE,
}
DIRECTION_MAP = {
Direction.FORWARD : pb2.FORWARD,
Direction.BACKWARD : pb2.BACKWARD,
}
ROW_EXISTENCE_EXPECTATION_MAP = {
RowExistenceExpectation.IGNORE : pb2.IGNORE,
RowExistenceExpectation.EXPECT_EXIST : pb2.EXPECT_EXIST ,
RowExistenceExpectation.EXPECT_NOT_EXIST : pb2.EXPECT_NOT_EXIST ,
}
class OTSProtoBufferEncoder(object):
def __init__(self, encoding):
self.encoding = encoding
self.api_encode_map = {
'CreateTable' : self._encode_create_table,
'DeleteTable' : self._encode_delete_table,
'ListTable' : self._encode_list_table,
'UpdateTable' : self._encode_update_table,
'DescribeTable' : self._encode_describe_table,
'GetRow' : self._encode_get_row,
'PutRow' : self._encode_put_row,
'UpdateRow' : self._encode_update_row,
'DeleteRow' : self._encode_delete_row,
'BatchGetRow' : self._encode_batch_get_row,
'BatchWriteRow' : self._encode_batch_write_row,
'GetRange' : self._encode_get_range,
'ListSearchIndex' : self._encode_list_search_index,
'CreateSearchIndex' : self._encode_create_search_index,
'DescribeSearchIndex' : self._encode_describe_search_index,
'DeleteSearchIndex' : self._encode_delete_search_index,
'Search' : self._encode_search,
'CreateIndex' : self._encode_create_index,
'DropIndex' : self._encode_delete_index,
}
def _get_enum(self, e):
# to compatible with enum and enum34
return e.value if hasattr(e, 'value') else e
def _get_unicode(self, value):
if isinstance(value, six.binary_type):
return value.decode(self.encoding)
elif isinstance(value, six.text_type):
return value
else:
raise OTSClientError(
"expect str or unicode type for string, not %s: %s" % (
value.__class__.__name__, str(value))
)
def _get_int32(self, int32):
if isinstance(int32, int):
if int32 < INT32_MIN or int32 > INT32_MAX:
raise OTSClientError("%s exceeds the range of int32" % int32)
return int32
else:
raise OTSClientError(
"expect int or long for the value, not %s"
% int32.__class__.__name__
)
def _make_repeated_column_names(self, proto, columns_to_get):
if columns_to_get is None:
# if no column name is given, get all primary_key_columns and attribute_columns.
return
if not isinstance(columns_to_get, list) and not isinstance(columns_to_get, tuple):
raise OTSClientError(
"expect list or tuple for columns_to_get, not %s"
% columns_to_get.__class__.__name__
)
for column_name in columns_to_get:
proto.append(self._get_unicode(column_name))
def _make_column_value(self, proto, value):
# you have to put 'int' under 'bool' in the switch case
# because a bool is also a int !!!
if isinstance(value, six.text_type) or isinstance(value, six.text_type):
string = self._get_unicode(value)
proto.type = pb2.STRING
proto.v_string = string
elif isinstance(value, bool):
proto.type = pb2.BOOLEAN
proto.v_bool = value
elif isinstance(value, int):
proto.type = pb2.INTEGER
proto.v_int = value
elif isinstance(value, float):
proto.type = pb2.DOUBLE
proto.v_double = value
elif isinstance(value, bytearray):
proto.type = pb2.BINARY
proto.v_binary = bytes(value)
elif value is INF_MIN:
proto.type = pb2.INF_MIN
elif value is INF_MAX:
proto.type = pb2.INF_MAX
else:
raise OTSClientError(
"expect str, unicode, int, long, bool or float for colum value, not %s"
% value.__class__.__name__
)
def _get_column_option(self, option):
global PRIMARY_KEY_OPTION_MAP
enum_map = PRIMARY_KEY_OPTION_MAP
proto_option = enum_map.get(option)
if proto_option != None:
return proto_option
else:
raise OTSClientError(
"primary_key_option should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(option)
)
)
def _get_column_type(self, type_str):
global PRIMARY_KEY_TYPE_MAP
enum_map = PRIMARY_KEY_TYPE_MAP
proto_type = enum_map.get(type_str)
if proto_type != None:
return proto_type
else:
raise OTSClientError(
"primary_key_type should be one of [%s], not %s" % (
", ".join(sorted(list(enum_map.keys()))), str(type_str)
)
)
def _make_composite_condition(self, condition):
proto = filter_pb2.CompositeColumnValueFilter()
# combinator
global LOGICAL_OPERATOR_MAP
enum_map = LOGICAL_OPERATOR_MAP
proto.combinator = enum_map.get(condition.combinator)
if proto.combinator is None:
raise OTSClientError(
"LogicalOperator should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(condition.combinator)
)
)
for sub in condition.sub_conditions:
self._make_column_condition(proto.sub_filters.add(), sub)
return proto.SerializeToString()
def _make_relation_condition(self, condition):
proto = filter_pb2.SingleColumnValueFilter()
# comparator
global COMPARATOR_TYPE_MAP
enum_map = COMPARATOR_TYPE_MAP
proto.comparator = enum_map.get(condition.comparator)
if proto.comparator is None:
raise OTSClientError(
"ComparatorType should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(condition.comparator)
)
)
proto.column_name = self._get_unicode(condition.column_name)
proto.column_value = bytes(PlainBufferBuilder.serialize_column_value(condition.column_value))
proto.filter_if_missing = not condition.pass_if_missing
proto.latest_version_only = condition.latest_version_only
return proto.SerializeToString()
def _make_column_condition(self, proto, column_condition):
if column_condition == None:
return
if not isinstance(column_condition, ColumnCondition):
raise OTSClientError(
"column condition should be an instance of ColumnCondition, not %s" %
column_condition.__class__.__name__
)
# type
global COLUMN_CONDITION_TYPE_MAP
enum_map = COLUMN_CONDITION_TYPE_MAP
proto.type = enum_map.get(column_condition.get_type())
if proto.type is None:
raise OTSClientError(
"column_condition_type should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(column_condition.type)
)
)
# condition
if isinstance(column_condition, CompositeColumnCondition):
proto.filter = self._make_composite_condition(column_condition)
elif isinstance(column_condition, SingleColumnCondition):
proto.filter = self._make_relation_condition(column_condition)
else:
raise OTSClientError(
"expect CompositeColumnCondition, SingleColumnCondition but not %s"
% column_condition.__class__.__name__
)
def _make_condition(self, proto, condition):
if not isinstance(condition, Condition):
raise OTSClientError(
"condition should be an instance of Condition, not %s" %
condition.__class__.__name__
)
global ROW_EXISTENCE_EXPECTATION_MAP
enum_map = ROW_EXISTENCE_EXPECTATION_MAP
expectation_str = self._get_unicode(condition.row_existence_expectation)
proto.row_existence = enum_map.get(expectation_str)
if proto.row_existence is None:
raise OTSClientError(
"row_existence_expectation should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(expectation_str)
)
)
if condition.column_condition is not None:
pb_filter = filter_pb2.Filter()
self._make_column_condition(pb_filter, condition.column_condition)
proto.column_condition = pb_filter.SerializeToString()
def _get_direction(self, direction_str):
global DIRECTION_MAP
enum_map = DIRECTION_MAP
proto_direction = enum_map.get(direction_str)
if proto_direction != None:
return proto_direction
else:
raise OTSClientError(
"direction should be one of [%s], not %s" % (
", ".join(list(enum_map.keys())), str(direction_str)
)
)
def _make_column_schema(self, proto, schema_tuple):
proto.name = self._get_unicode(schema_tuple[0])
proto.type = self._get_column_type(schema_tuple[1])
if len(schema_tuple) == 3:
proto.option = self._get_column_option(schema_tuple[2])
def _make_schemas_with_list(self, proto, schema_list):
for schema_tuple in schema_list:
if not isinstance(schema_tuple, tuple):
raise OTSClientError(
"all schemas of primary keys should be tuple, not %s" % (
schema_tuple.__class__.__name__
)
)
schema_proto = proto.add()
self._make_column_schema(schema_proto, schema_tuple)
def _make_columns_with_dict(self, proto, column_dict):
for name, value in column_dict.items():
item = proto.add()
item.name = self._get_unicode(name)
self._make_column_value(item.value, value)
def _make_update_of_attribute_columns_with_dict(self, proto, column_dict):
if not isinstance(column_dict, dict):
raise OTSClientError(
"expect dict for 'update_of_attribute_columns', not %s" % (
column_dict.__class__.__name__
)
)
for key, value in column_dict.items():
if key == 'put':
if not isinstance(column_dict[key], dict):
raise OTSClientError(
"expect dict for put operation in 'update_of_attribute_columns', not %s" % (
column_dict[key].__class__.__name__
)
)
for name, value in column_dict[key].items():
item = proto.add()
item.type = pb2.PUT
item.name = self._get_unicode(name)
self._make_column_value(item.value, value)
elif key == 'delete':
if not isinstance(column_dict[key], list):
raise OTSClientError(
"expect list for delete operation in 'update_of_attribute_columns', not %s" % (
column_dict[key].__class__.__name__
)
)
for name in column_dict[key]:
item = proto.add()
item.type = pb2.DELETE
item.name = self._get_unicode(name)
else:
raise OTSClientError(
"operation type in 'update_of_attribute_columns' should be 'put' or 'delete', not %s" % (
key
)
)
def _make_index_field_schema(self, proto, field_schema):
proto.field_name = self._get_unicode(field_schema.field_name)
proto.field_type = self._get_enum(field_schema.field_type)
if field_schema.index is not None:
proto.index = field_schema.index
if field_schema.store is not None:
proto.store = field_schema.store
if field_schema.is_array is not None:
proto.is_array = field_schema.is_array
if field_schema.enable_sort_and_agg is not None:
proto.enable_sort_and_agg = field_schema.enable_sort_and_agg
if field_schema.analyzer:
proto.analyzer = field_schema.analyzer
for sub_field_schema in field_schema.sub_field_schemas:
sub_field_proto = proto.field_schemas.add()
self._make_index_field_schema(sub_field_proto, sub_field_schema)
def _make_index_setting(self, proto, index_setting):
proto.number_of_shards = 1
proto.routing_fields.extend(index_setting.routing_fields)
def _make_index_sorter(self, proto, sorter):
if not isinstance(sorter, Sorter):
raise OTSClientError(
"sorter should be an instance of Sorter, not %s"
% sorter.__class__.__name__
)
if isinstance(sorter, PrimaryKeySort):
proto.pk_sort.order = self._get_enum(sorter.sort_order)
elif isinstance(sorter, FieldSort):
proto.field_sort.field_name = sorter.field_name
if sorter.sort_order is not None:
proto.field_sort.order = self._get_enum(sorter.sort_order)
if sorter.sort_mode is not None:
proto.field_sort.mode = self._get_enum(sorter.sort_mode)
if sorter.nested_filter is not None:
self._make_nested_filter(proto.field_sort.nested_filter, sorter.nested_filter)
elif isinstance(sorter, GeoDistanceSort):
proto.geo_distance_sort.field_name = sorter.field_name
proto.geo_distance_sort.points.extend(sorter.points)
if sorter.sort_order is not None:
proto.geo_distance_sort.order = self._get_enum(sorter.sort_order)
if sorter.sort_mode is not None:
proto.geo_distance_sort.mode = self._get_enum(sorter.sort_mode)
if sorter.geo_distance_type is not None:
proto.geo_distance_sort.distance_type = self._get_enum(sorter.geo_distance_type)
if sorter.nested_filter is not None:
self._make_nested_filter(proto.geo_distance_sort.nested_filter, sorter.nested_filter)
elif isinstance(sorter, ScoreSort):
proto.score_sort.order = self._get_enum(sorter.sort_order)
else:
raise OTSClientError(
"Only PrimaryKeySort and FieldSort are allowed, not %s."
% sorter.__class__.__name__
)
def _make_index_sort(self, proto, index_sort):
if not isinstance(index_sort, Sort):
raise OTSClientError(
"index_sort should be an instance of Sort, not %s"
% index_sort.__class__.__name__
)
for sorter in index_sort.sorters:
self._make_index_sorter(proto.sorter.add(), sorter)
def _make_index_meta(self, proto, index_meta):
if not isinstance(index_meta, SearchIndexMeta):
raise OTSClientError(
"index_meta should be an instance of SearchIndexMeta, not %s"
% index_meta.__class__.__name__
)
for field in index_meta.fields:
field_proto = proto.field_schemas.add()
self._make_index_field_schema(field_proto, field)
index_setting = index_meta.index_setting if index_meta.index_setting else IndexSetting()
self._make_index_setting(proto.index_setting, index_setting)
if index_meta.index_sort:
self._make_index_sort(proto.index_sort, index_meta.index_sort)
def _get_defined_column_type(self, column_type):
if column_type == 'STRING':
return pb2.DCT_STRING
elif column_type == 'INTEGER':
return pb2.DCT_INTEGER
elif column_type == 'DOUBLE':
return pb2.DCT_DOUBLE
elif column_type == 'BOOLEAN':
return pb2.DCT_BOOLEAN
elif column_type == 'BINARY':
return pb2.DCT_BLOB
else:
raise OTSClientError(
"Wrong type for defined column, only support [STRING, INTEGER, DOUBLE, BOOLEAN, BINARY]."
)
def _make_defined_column_schema(self, proto, defined_columns):
if defined_columns:
for defined_column in defined_columns:
if not isinstance(defined_column, tuple):
raise OTSClientError(
"all schemas of primary keys should be tuple, not %s" % (
defined_column.__class__.__name__
)
)
column_proto = proto.add()
column_proto.name = defined_column[0]
column_proto.type = self._get_defined_column_type(defined_column[1])
def _make_table_meta(self, proto, table_meta):
if not isinstance(table_meta, TableMeta):
raise OTSClientError(
"table_meta should be an instance of TableMeta, not %s"
% table_meta.__class__.__name__
)
proto.table_name = self._get_unicode(table_meta.table_name)
self._make_schemas_with_list(
proto.primary_key,
table_meta.schema_of_primary_key,
)
self._make_defined_column_schema(
proto.defined_column,
table_meta.defined_columns
)
def _make_table_options(self, proto, table_options):
if not isinstance(table_options, TableOptions):
raise OTSClientError(
"table_option should be an instance of TableOptions, not %s"
% table_options.__class__.__name__
)
if table_options.time_to_live is not None:
if not isinstance(table_options.time_to_live, int):
raise OTSClientError(
"time_to_live should be an instance of int, not %s"
% table_options.time_to_live.__class__.__name__
)
proto.time_to_live = table_options.time_to_live
if table_options.max_version is not None:
if not isinstance(table_options.max_version, int):
raise OTSClientError(
"max_version should be an instance of int, not %s"
% table_options.max_version.__class__.__name__
)
proto.max_versions = table_options.max_version
if table_options.max_time_deviation is not None:
if not isinstance(table_options.max_time_deviation, int):
raise OTSClientError(
"max_time_deviation should be an instance of TableOptions, not %s"
% table_options.max_time_deviation.__class__.__name__
)
proto.deviation_cell_version_in_sec = table_options.max_time_deviation
def _make_capacity_unit(self, proto, capacity_unit):
if not isinstance(capacity_unit, CapacityUnit):
raise OTSClientError(
"capacity_unit should be an instance of CapacityUnit, not %s"
% capacity_unit.__class__.__name__
)
if capacity_unit.read is None or capacity_unit.write is None:
raise OTSClientError("both of read and write of CapacityUnit are required")
proto.read = self._get_int32(capacity_unit.read)
proto.write = self._get_int32(capacity_unit.write)
def _make_reserved_throughput(self, proto, reserved_throughput):
if not isinstance(reserved_throughput, ReservedThroughput):
raise OTSClientError(
"reserved_throughput should be an instance of ReservedThroughput, not %s"
% reserved_throughput.__class__.__name__
)
self._make_capacity_unit(proto.capacity_unit, reserved_throughput.capacity_unit)
def _make_update_capacity_unit(self, proto, capacity_unit):
if not isinstance(capacity_unit, CapacityUnit):
raise OTSClientError(
"capacity_unit should be an instance of CapacityUnit, not %s"
% capacity_unit.__class__.__name__
)
if capacity_unit.read is None and capacity_unit.write is None:
raise OTSClientError("at least one of read or write of CapacityUnit is required")
if capacity_unit.read is not None:
proto.read = self._get_int32(capacity_unit.read)
if capacity_unit.write is not None:
proto.write = self._get_int32(capacity_unit.write)
def _make_update_reserved_throughput(self, proto, reserved_throughput):
if not isinstance(reserved_throughput, ReservedThroughput):
raise OTSClientError(
"reserved_throughput should be an instance of ReservedThroughput, not %s"
% reserved_throughput.__class__.__name__
)
self._make_update_capacity_unit(proto.capacity_unit, reserved_throughput.capacity_unit)
def _make_batch_get_row_internal(self, proto, request):
for table_name, item in list(request.items.items()):
table_item = proto.tables.add()
table_item.table_name = self._get_unicode(item.table_name)
self._make_repeated_column_names(table_item.columns_to_get, item.columns_to_get)
if item.column_filter is not None:
pb_filter = filter_pb2.Filter()
self._make_column_condition(pb_filter, item.column_filter)
table_item.filter = pb_filter.SerializeToString()
for pk in item.primary_keys:
table_item.primary_key.append(bytes(PlainBufferBuilder.serialize_primary_key(pk)))
if item.token is not None:
for token in item.token:
table_item.token.append(token)
if item.max_version is not None:
table_item.max_versions = item.max_version
if item.time_range is not None:
if isinstance(item.time_range, tuple):
table_item.time_range.start_time = item.time_range[0]
table_item.time_range.end_time = item.time_range[1]
elif isinstance(item.time_range, int) or isinstance(item.time_range, int):
table_item.time_range.specific_time = item.time_range
if item.start_column is not None:
table_item.start_column = item.start_column
if item.end_column is not None:
table_item.end_column = item.end_column
def _make_batch_get_row(self, proto, request):
if isinstance(request, BatchGetRowRequest):
self._make_batch_get_row_internal(proto, request)
else:
raise OTSClientError("The request should be a instance of BatchGetRowRequest, not %d"%(len(request.__class__.__name__)))
def _make_put_row_item(self, proto, put_row_item):
condition = put_row_item.condition
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if put_row_item.return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.row_change = bytes(PlainBufferBuilder.serialize_for_put_row(
put_row_item.row.primary_key, put_row_item.row.attribute_columns))
proto.type = pb2.PUT
return proto
def _make_update_row_item(self, proto, update_row_item):
condition = update_row_item.condition
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if update_row_item.return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.row_change = bytes(PlainBufferBuilder.serialize_for_update_row(
update_row_item.row.primary_key, update_row_item.row.attribute_columns))
proto.type = pb2.UPDATE
return proto
def _make_delete_row_item(self, proto, delete_row_item):
condition = delete_row_item.condition
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if delete_row_item.return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.row_change = bytes(PlainBufferBuilder.serialize_for_delete_row(delete_row_item.row.primary_key))
proto.type = pb2.DELETE
return proto
def _make_batch_write_row_internal(self, proto, request):
for table_name, item in list(request.items.items()):
table_item = proto.tables.add()
table_item.table_name = self._get_unicode(item.table_name)
for row_item in item.row_items:
if row_item.type == BatchWriteRowType.PUT:
row = table_item.rows.add()
self._make_put_row_item(row, row_item)
if row_item.type == BatchWriteRowType.UPDATE:
row = table_item.rows.add()
self._make_update_row_item(row, row_item)
if row_item.type == BatchWriteRowType.DELETE:
row = table_item.rows.add()
self._make_delete_row_item(row, row_item)
def _make_batch_write_row(self, proto, request):
if isinstance(request, BatchWriteRowRequest):
self._make_batch_write_row_internal(proto, request)
else:
raise OTSClientError("The request should be a instance of MultiTableInBatchWriteRowItem, not %d"%(len(request.__class__.__name__)))
def _make_secondary_index(self, proto, secondary_index):
proto.name = secondary_index.index_name
proto.primary_key.extend(secondary_index.primary_key_names)
proto.defined_column.extend(secondary_index.defined_column_names)
if secondary_index.index_type == SecondaryIndexType.GLOBAL_INDEX:
proto.index_type = pb2.IT_GLOBAL_INDEX
proto.index_update_mode = pb2.IUM_ASYNC_INDEX
elif secondary_index.index_type == SecondaryIndexType.LOCAL_INDEX:
proto.index_type = pb2.IT_LOCAL_INDEX
proto.index_update_mode = pb2.IUM_SYNC_INDEX
def _encode_create_table(self, table_meta, table_options, reserved_throughput, secondary_indexes):
proto = pb2.CreateTableRequest()
self._make_table_meta(proto.table_meta, table_meta)
self._make_reserved_throughput(proto.reserved_throughput, reserved_throughput)
self._make_table_options(proto.table_options, table_options)
for secondary_index in secondary_indexes:
self._make_secondary_index(proto.index_metas.add(), secondary_index)
return proto
def _encode_delete_table(self, table_name):
proto = pb2.DeleteTableRequest()
proto.table_name = self._get_unicode(table_name)
return proto
def _encode_list_table(self):
proto = pb2.ListTableRequest()
return proto
def _encode_update_table(self, table_name, table_options, reserved_throughput):
proto = pb2.UpdateTableRequest()
proto.table_name = self._get_unicode(table_name)
if reserved_throughput is not None:
self._make_update_reserved_throughput(proto.reserved_throughput, reserved_throughput)
if table_options is not None:
self._make_table_options(proto.table_options, table_options)
return proto
def _encode_describe_table(self, table_name):
proto = pb2.DescribeTableRequest()
proto.table_name = self._get_unicode(table_name)
return proto
def _encode_get_row(self, table_name, primary_key, columns_to_get, column_filter,
max_version, time_range, start_column, end_column, token):
proto = pb2.GetRowRequest()
proto.table_name = self._get_unicode(table_name)
self._make_repeated_column_names(proto.columns_to_get, columns_to_get)
if column_filter is not None:
pb_filter = filter_pb2.Filter()
self._make_column_condition(pb_filter, column_filter)
proto.filter = pb_filter.SerializeToString()
proto.primary_key = bytes(PlainBufferBuilder.serialize_primary_key(primary_key))
if max_version is not None:
proto.max_versions = max_version
if time_range is not None:
if isinstance(time_range, tuple):
proto.time_range.start_time = time_range[0]
proto.time_range.end_time = time_range[1]
elif isinstance(time_range, int) or isinstance(time_range, int):
proto.time_range.specific_time = time_range
if start_column is not None:
proto.start_column = start_column
if end_column is not None:
proto.end_column = end_column
if token is not None:
proto.token = token
return proto
def _encode_put_row(self, table_name, row, condition, return_type):
proto = pb2.PutRowRequest()
proto.table_name = self._get_unicode(table_name)
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.row = bytes(PlainBufferBuilder.serialize_for_put_row(row.primary_key, row.attribute_columns))
return proto
def _encode_update_row(self, table_name, row, condition, return_type):
proto = pb2.UpdateRowRequest()
proto.table_name = self._get_unicode(table_name)
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.row_change = bytes(PlainBufferBuilder.serialize_for_update_row(row.primary_key, row.attribute_columns))
return proto
def _encode_delete_row(self, table_name, row, condition, return_type):
proto = pb2.DeleteRowRequest()
proto.table_name = self._get_unicode(table_name)
if condition is None:
condition = Condition(RowExistenceExpectation.IGNORE, None)
self._make_condition(proto.condition, condition)
if return_type == ReturnType.RT_PK:
proto.return_content.return_type = pb2.RT_PK
proto.primary_key = bytes(PlainBufferBuilder.serialize_for_delete_row(row.primary_key))
return proto
def _encode_batch_get_row(self, request):
proto = pb2.BatchGetRowRequest()
self._make_batch_get_row(proto, request)
return proto
def _encode_batch_write_row(self, request):
proto = pb2.BatchWriteRowRequest()
self._make_batch_write_row(proto, request)
return proto
def _encode_get_range(self, table_name, direction,
inclusive_start_primary_key, exclusive_end_primary_key,
columns_to_get, limit, column_filter,
max_version, time_range, start_column,
end_column, token):
proto = pb2.GetRangeRequest()
proto.table_name = self._get_unicode(table_name)
proto.direction = self._get_direction(direction)
self._make_repeated_column_names(proto.columns_to_get, columns_to_get)
proto.inclusive_start_primary_key = bytes(PlainBufferBuilder.serialize_primary_key(inclusive_start_primary_key))
proto.exclusive_end_primary_key = bytes(PlainBufferBuilder.serialize_primary_key(exclusive_end_primary_key))
if column_filter is not None:
pb_filter = filter_pb2.Filter()
self._make_column_condition(pb_filter, column_filter)
proto.filter = pb_filter.SerializeToString()
if limit is not None:
proto.limit = self._get_int32(limit)
if max_version is not None:
proto.max_versions = max_version
if time_range is not None:
if isinstance(time_range, tuple):
proto.time_range.start_time = time_range[0]
proto.time_range.end_time = time_range[1]
elif isinstance(time_range, int):
proto.time_range.specific_time = time_range
if start_column is not None:
proto.start_column = start_column
if end_column is not None:
proto.end_colun = end_column
if token is not None:
proto.token = token
return proto
def encode_request(self, api_name, *args, **kwargs):
if api_name not in self.api_encode_map:
raise OTSClientError("No PB encode method for API %s" % api_name)
handler = self.api_encode_map[api_name]
return handler(*args, **kwargs)
def _encode_list_search_index(self, table_name):
proto = search_pb2.ListSearchIndexRequest()
if table_name:
proto.table_name = self._get_unicode(table_name)
return proto
def _encode_delete_search_index(self, table_name, index_name):
proto = search_pb2.DeleteSearchIndexRequest()
proto.table_name = table_name
proto.index_name = index_name
return proto
def _encode_describe_search_index(self, table_name, index_name):
proto = search_pb2.DescribeSearchIndexRequest()
proto.table_name = self._get_unicode(table_name)
proto.index_name = self._get_unicode(index_name)
return proto
def _encode_create_search_index(self, table_name, index_name, index_meta):
proto = search_pb2.CreateSearchIndexRequest()
proto.table_name = self._get_unicode(table_name)
proto.index_name = self._get_unicode(index_name)
self._make_index_meta(proto.schema, index_meta)
return proto
def _make_nested_filter(self, proto, nested_filter):
proto.path = nested_filter.path
self._make_query(proto.filter, nested_filter.query_filter)
def _encode_search(self, table_name, index_name, search_query, columns_to_get, routing_keys):
proto = search_pb2.SearchRequest()
proto.table_name = table_name
proto.index_name = index_name
if columns_to_get is not None:
proto.columns_to_get.return_type = self._get_enum(columns_to_get.return_type)
self._make_repeated_column_names(proto.columns_to_get.column_names, columns_to_get.column_names)
proto.search_query = self._encode_search_query(search_query)
if routing_keys is not None:
for routing_key in routing_keys:
proto.routing_values.append(bytes(PlainBufferBuilder.serialize_primary_key(routing_key)))
return proto
def _encode_match_query(self, query):
proto = search_pb2.MatchQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.text = self._get_unicode(query.text)
if query.minimum_should_match is not None:
proto.minimum_should_match = query.minimum_should_match
if query.operator is not None:
proto.operator = search_pb2.OR if (query.operator == QueryOperator.OR) else search_pb2.AND
return proto.SerializeToString()
def _encode_match_phase_query(self, query):
proto = search_pb2.MatchPhraseQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.text = self._get_unicode(query.text)
return proto.SerializeToString()
def _encode_term_query(self, query):
proto = search_pb2.TermQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.term = bytes(PlainBufferBuilder.serialize_column_value(query.column_value))
return proto.SerializeToString()
def _encode_range_query(self, query):
proto = search_pb2.RangeQuery()
proto.field_name = self._get_unicode(query.field_name)
if query.range_from is not None:
proto.range_from = bytes(PlainBufferBuilder.serialize_column_value(query.range_from))
if query.range_to is not None:
proto.range_to = bytes(PlainBufferBuilder.serialize_column_value(query.range_to))
if query.include_lower is not None:
proto.include_lower = query.include_lower
if query.include_upper is not None:
proto.include_upper = query.include_upper
return proto.SerializeToString()
def _encode_prefix_query(self, query):
proto = search_pb2.PrefixQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.prefix = self._get_unicode(query.prefix)
return proto.SerializeToString()
def _encode_bool_query(self, query):
proto = search_pb2.BoolQuery()
for q in query.must_queries:
q_proto = proto.must_queries.add()
self._make_query(q_proto, q)
for q in query.must_not_queries:
q_proto = proto.must_not_queries.add()
self._make_query(q_proto, q)
for q in query.filter_queries:
q_proto = proto.filter_queries.add()
self._make_query(q_proto, q)
for q in query.should_queries:
q_proto = proto.should_queries.add()
self._make_query(q_proto, q)
if query.minimum_should_match is not None:
proto.minimum_should_match = query.minimum_should_match
return proto.SerializeToString()
def _encode_nested_query(self, query):
proto = search_pb2.NestedQuery()
proto.path = query.path
self._make_query(proto.query, query.query)
if query.score_mode is not None:
proto.score_mode = self._get_enum(query.score_mode)
return proto.SerializeToString()
def _encode_wildcard_query(self, query):
proto = search_pb2.WildcardQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.value = self._get_unicode(query.value)
return proto.SerializeToString()
def _encode_match_all_query(self, query):
proto = search_pb2.MatchAllQuery()
return proto.SerializeToString()
def _encode_geo_bounding_box_query(self, query):
proto = search_pb2.GeoBoundingBoxQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.top_left = self._get_unicode(query.top_left)
proto.bottom_right = self._get_unicode(query.bottom_right)
return proto.SerializeToString()
def _encode_geo_distance_query(self, query):
proto = search_pb2.GeoDistanceQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.center_point = self._get_unicode(query.center_point)
proto.distance = float(query.distance)
return proto.SerializeToString()
def _encode_geo_polygon_query(self, query):
proto = search_pb2.GeoPolygonQuery()
proto.field_name = self._get_unicode(query.field_name)
proto.points.extend(query.points)
return proto.SerializeToString()
def _encode_terms_query(self, query):
proto = search_pb2.TermsQuery()
proto.field_name = query.field_name
for column_value in query.column_values:
proto.terms.append(bytes(PlainBufferBuilder.serialize_column_value(column_value)))
return proto.SerializeToString()
def _make_function_value_factor(self, proto, value_factor):
proto.field_name = self._get_unicode(value_factor.field_name)
def _encode_function_score_query(self, query):
proto = search_pb2.FunctionScoreQuery()
self._make_query(proto.query, query.query)
self._make_function_value_factor(proto.field_value_factor, query.field_value_factor)
return proto.SerializeToString()
def _make_query(self, proto, query):
if isinstance(query, MatchQuery):
proto.type = search_pb2.MATCH_QUERY
proto.query = self._encode_match_query(query)
elif isinstance(query, MatchPhraseQuery):
proto.type = search_pb2.MATCH_PHRASE_QUERY
proto.query = self._encode_match_phase_query(query)
elif isinstance(query, TermQuery):
proto.type = search_pb2.TERM_QUERY
proto.query = self._encode_term_query(query)
elif isinstance(query, RangeQuery):
proto.type = search_pb2.RANGE_QUERY
proto.query = self._encode_range_query(query)
elif isinstance(query, PrefixQuery):
proto.type = search_pb2.PREFIX_QUERY
proto.query = self._encode_prefix_query(query)
elif isinstance(query, BoolQuery):
proto.type = search_pb2.BOOL_QUERY
proto.query = self._encode_bool_query(query)
elif isinstance(query, NestedQuery):
proto.type = search_pb2.NESTED_QUERY
proto.query = self._encode_nested_query(query)
elif isinstance(query, WildcardQuery):
proto.type = search_pb2.WILDCARD_QUERY
proto.query = self._encode_wildcard_query(query)
elif isinstance(query, MatchAllQuery):
proto.type = search_pb2.MATCH_ALL_QUERY
proto.query = self._encode_match_all_query(query)
elif isinstance(query, GeoBoundingBoxQuery):
proto.type = search_pb2.GEO_BOUNDING_BOX_QUERY
proto.query = self._encode_geo_bounding_box_query(query)
elif isinstance(query, GeoDistanceQuery):
proto.type = search_pb2.GEO_DISTANCE_QUERY
proto.query = self._encode_geo_distance_query(query)
elif isinstance(query, GeoPolygonQuery):
proto.type = search_pb2.GEO_POLYGON_QUERY
proto.query = self._encode_geo_polygon_query(query)
elif isinstance(query, TermsQuery):
proto.type = search_pb2.TERMS_QUERY
proto.query = self._encode_terms_query(query)
elif isinstance(query, FunctionScoreQuery):
proto.type = search_pb2.FUNCTION_SCORE_QUERY
proto.query = self._encode_function_score_query(query)
else:
raise OTSClientError(
"Invalid query type: %s"
% query.__class__.__name__
)
def _make_collapse(self, proto, collapse):
proto.field_name = collapse.field_name
def _encode_search_query(self, search_query):
proto = search_pb2.SearchQuery()
self._make_query(proto.query, search_query.query)
if search_query.sort is not None:
self._make_index_sort(proto.sort, search_query.sort)
if search_query.get_total_count is not None:
proto.get_total_count = search_query.get_total_count
if search_query.next_token is not None:
proto.token = search_query.next_token
if search_query.offset is not None:
proto.offset = search_query.offset
if search_query.limit is not None:
proto.limit = search_query.limit
#if search_query.collapse is not None:
# self._make_collapse(proto.collapse, search_query.collapse)
return proto.SerializeToString()
def _encode_create_index(self, table_name, index_meta):
proto = pb2.CreateIndexRequest()
proto.main_table_name = table_name
self._make_secondary_index(proto.index_meta, index_meta)
return proto
def _encode_delete_index(self, table_name, index_name):
proto = pb2.DropIndexRequest()
proto.main_table_name = table_name
proto.index_name = index_name
return proto
| 40.895255
| 143
| 0.64757
|
fbd0e18723d6a03f6228afd14f1033ad9fa6c5d4
| 3,786
|
py
|
Python
|
airflow/providers/microsoft/azure/hooks/container_volume.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
airflow/providers/microsoft/azure/hooks/container_volume.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
airflow/providers/microsoft/azure/hooks/container_volume.py
|
takuti/airflow
|
0ac3b8c3dd749c59e60cf0169580b9e7c5049d9e
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict
from azure.mgmt.containerinstance.models import AzureFileVolume, Volume
from airflow.hooks.base import BaseHook
class AzureContainerVolumeHook(BaseHook):
"""
A hook which wraps an Azure Volume.
:param azure_container_volume_conn_id: Reference to the
:ref:`Azure Container Volume connection id <howto/connection:azure_container_volume>`
of an Azure account of which container volumes should be used.
"""
conn_name_attr = "azure_container_volume_conn_id"
default_conn_name = 'azure_container_volume_default'
conn_type = 'azure_container_volume'
hook_name = 'Azure Container Volume'
def __init__(self, azure_container_volume_conn_id: str = 'azure_container_volume_default') -> None:
super().__init__()
self.conn_id = azure_container_volume_conn_id
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField
return {
"extra__azure_container_volume__connection_string": PasswordField(
lazy_gettext('Blob Storage Connection String (optional)'), widget=BS3PasswordFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema', 'port', 'host', "extra"],
"relabeling": {
'login': 'Azure Client ID',
'password': 'Azure Secret',
},
"placeholders": {
'login': 'client_id (token credentials auth)',
'password': 'secret (token credentials auth)',
'extra__azure_container_volume__connection_string': 'connection string auth',
},
}
def get_storagekey(self) -> str:
"""Get Azure File Volume storage key"""
conn = self.get_connection(self.conn_id)
service_options = conn.extra_dejson
if 'extra__azure_container_volume__connection_string' in service_options:
for keyvalue in service_options['extra__azure_container_volume__connection_string'].split(";"):
key, value = keyvalue.split("=", 1)
if key == "AccountKey":
return value
return conn.password
def get_file_volume(
self, mount_name: str, share_name: str, storage_account_name: str, read_only: bool = False
) -> Volume:
"""Get Azure File Volume"""
return Volume(
name=mount_name,
azure_file=AzureFileVolume(
share_name=share_name,
storage_account_name=storage_account_name,
read_only=read_only,
storage_account_key=self.get_storagekey(),
),
)
| 39.030928
| 107
| 0.667987
|
d65e825fd5142a23a8d69d3495ac24c4b15da201
| 3,559
|
py
|
Python
|
explorer/scripts/healthcare_gov_extract.py
|
bayesimpact/tds-frontend
|
a4f47e384ef4fe4dc43c30423a1713c2c93dc87f
|
[
"Apache-2.0"
] | 15
|
2018-05-08T23:54:38.000Z
|
2020-03-07T20:46:37.000Z
|
explorer/scripts/healthcare_gov_extract.py
|
akegan/encompass
|
85852a91c646c62e8cd05f9c2b0c7cf0079ea7f2
|
[
"Apache-2.0"
] | 297
|
2018-02-05T19:04:26.000Z
|
2022-02-12T07:52:37.000Z
|
explorer/scripts/healthcare_gov_extract.py
|
bayesimpact/tds
|
a4f47e384ef4fe4dc43c30423a1713c2c93dc87f
|
[
"Apache-2.0"
] | 6
|
2018-05-21T19:51:15.000Z
|
2019-03-21T19:20:27.000Z
|
#!/usr/local/bin/python3.6
"""
Extract provider json files from Healthcare.gov provider endpoints.
Writes results to data folder in this repo
bayeshack github repo: https://github.com/bayesimpact/bayeshack-hhs-marketplace
Spreadsheet of links to all machine readable PUFs:
http://download.cms.gov/marketplace-puf/2016/machine-readable-url-puf.zip
"""
import argparse
import errno
import logging
import os
import requests
from lib import etl_helper
logging.basicConfig(level=logging.INFO)
def _main(**kwargs):
"""Manually kickoff the ETL process for a given state."""
state = kwargs['state']
logging.info('Starting up ETL process for {}'.format(state))
plans = etl_helper.extract_plans(state)
logging.info('There are {} plans listed in {}'.format(len(plans), state))
logging.info('{}'.format([plan[0] for plan in plans]))
# Exclude dentists if the user wants.
dental_plan_urls = [plan[1] for plan in plans if 'dent' in plan[1].lower()]
if etl_helper.query_yes_no(message="Would you like to exclude these dental plans? {}".format(
dental_plan_urls)):
plans = [plan for plan in plans if not plan[1] in dental_plan_urls]
for issuer_id, plan_url in plans:
logging.info('Processing plan {} at url {}'.format(issuer_id, plan_url))
try:
provider_urls = etl_helper.fetch_provider_urls(plan_url)
except Exception:
logging.error(
'Error fetching provider urls for {}. Moving on...'.format(issuer_id, plan_url))
continue
logging.info('There are {} provider urls for this plan'.format(len(provider_urls)))
for url in provider_urls:
target_path = etl_helper.HEALTHCARE_GOV_PATH + '/{}/{}/{}.json'.format(
state, etl_helper.clean_plan_name(issuer_id), etl_helper.clean_paths(url)
)
# Prevent downloading the same file if it already exists local
if os.path.exists(target_path):
logging.warning('Filepath {} already exists. Skipping...'.format(target_path))
continue
# Create directory for plan if it doesn't exist.
if not os.path.exists(os.path.dirname(target_path)):
try:
os.makedirs(os.path.dirname(target_path))
except OSError as exc: # Guard against race condition.
if exc.errno != errno.EEXIST:
raise
try:
response = requests.get(url, stream=True)
handle = open(target_path, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk: # Filter out keep-alive new chunks.
handle.write(chunk)
response.raise_for_status()
except Exception:
# Delete in progress downloaded file to ensure we get complete files.
os.remove(target_path)
logging.exception('Error fetching url {}. Deleting ...'.format(target_path))
logging.info('Plan {} completed'.format(issuer_id))
def _get_arguments():
"""Build argument parser."""
parser = argparse.ArgumentParser(description='This starts a measure calculation.')
parser.add_argument(
'-s', '--state',
help="""
State to extract data from.
""",
required=True,
type=str)
args = parser.parse_args()
return args.__dict__
if __name__ == '__main__':
_main(**_get_arguments())
| 35.237624
| 97
| 0.623771
|
c47ebe4b73ef4bde427916a26febc32e18bc55aa
| 21,333
|
py
|
Python
|
shingetsu/gateway.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 78
|
2015-01-09T10:49:10.000Z
|
2022-02-16T03:06:28.000Z
|
shingetsu/gateway.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 5
|
2015-01-11T16:24:33.000Z
|
2019-02-18T15:02:32.000Z
|
shingetsu/gateway.py
|
acemomiage/saku
|
66ab704106d368f7c916f9ba71b28fe9bef62c48
|
[
"BSD-2-Clause"
] | 24
|
2015-01-07T08:29:47.000Z
|
2022-03-23T07:22:20.000Z
|
"""Saku Gateway base module.
"""
#
# Copyright (c) 2005-2021 shinGETsu Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import html
import cgi
import os
import re
import urllib.request, urllib.error, urllib.parse
import sys
import time
from io import BytesIO
from . import basecgi
from . import config
from . import spam
from .cache import *
from .jscache import JsCache
from .node import *
from .title import *
from .tag import *
from .template import Template
from .updatequeue import UpdateQueue
from .util import opentext
dummyquery = str(int(time.time()));
class Message(dict):
"""Multi-language message for gateway."""
def __init__(self, file):
dict.__init__(self)
try:
f = opentext(file)
del_eos = re.compile(r"[\r\n]*")
iscomment = re.compile(r"^#$").search
for line in f:
line = del_eos.sub("", line)
if iscomment(line):
pass
else:
buf = line.split("<>")
if len(buf) == 2:
buf[1] = urllib.parse.unquote(buf[1])
self[buf[0]] = buf[1]
f.close()
except IOError:
sys.stderr.write(file + ": IOError\n")
# End of Message
def search_message(accept_language):
"""Search message file.
Example of accept_language: "ja,en-us;q=0.7,en;q=0.3"
"""
q = {}
lang = []
if accept_language != "":
for i in accept_language.split(","):
found = re.search(r"(\S+)\s*;\s*q=(\S+)", i)
if found:
try:
q[found.group(1)] = float(found.group(2))
except ValueError:
pass
else:
q[i] = 1
lang = list(q.keys())
lang.sort(key=lambda x: q[x], reverse=True)
lang.append(config.language)
for i in lang:
short_lang = i.split('-')[0]
for j in (i, short_lang):
file = config.file_dir + "/" + "message-" + j + ".txt"
if re.search(r'^[-A-Za-z0-9]+$', j) and os.path.isfile(file):
return Message(file)
return None
# End of search_message
class CGI(basecgi.CGI):
root = config.root_path
sep = config.query_separator
appli = config.application
gateway_cgi = config.gateway
thread_cgi = config.thread_cgi
admin_cgi = config.admin_cgi
message = None
filter = None
str_filter = ''
tag = None
str_tag = ''
def __init__(self,
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
environ=os.environ):
basecgi.CGI.__init__(self,
stdin=stdin,
stdout=stdout,
stderr=stderr,
environ=environ)
if "HTTP_ACCEPT_LANGUAGE" in self.environ:
al = self.environ["HTTP_ACCEPT_LANGUAGE"]
else:
al = ""
self.message = search_message(al)
addr = self.environ.get("REMOTE_ADDR", "")
self.remoteaddr = addr
self.isadmin = config.re_admin.search(addr)
self.isfriend = config.re_friend.search(addr)
self.isvisitor = config.re_visitor.search(addr)
self.obj_template = Template()
self.template = self.obj_template.display
self.jscache = JsCache(config.abs_docroot)
var = {
'cgi': self,
'environ': self.environ,
'ua': self.environ.get('HTTP_USER_AGENT', ''),
'message': self.message,
'lang': self.message['lang'],
'config': config,
'appli': self.appli,
'gateway_cgi': self.gateway_cgi,
'thread_cgi': self.thread_cgi,
'admin_cgi': self.admin_cgi,
'root_path': config.root_path,
'types': config.types,
'isadmin': self.isadmin,
'isfriend': self.isfriend,
'isvisitor': self.isvisitor,
'localtime': self.localtime,
'str_encode': self.str_encode,
'file_decode': self.file_decode,
'escape': self.escape,
'escape_simple': lambda s: html.escape(s, True),
'escape_space': self.escape_space,
'escape_js': self.escape_js,
'make_list_item': self.make_list_item,
'gateway_link': self.gateway_link,
'dummyquery': dummyquery,
}
self.obj_template.set_defaults(var)
def path_info(self):
"""Parse PATH_INFO.
If PATH_INFO is not defined, use QUERY_STRING.
x.cgi?foo&bar=y -> path="foo".
"""
m = re.search(r"^([^&;=]*)(&|$)", self.environ.get("QUERY_STRING", ""))
if self.environ.get("PATH_INFO", "") != "":
path = self.environ["PATH_INFO"]
if path.startswith("/"):
path = path[1:]
elif m is not None:
path = m.group(1)
else:
path = ""
path = self.escape(self.str_decode(path))
return path
def str_encode(self, query):
return str_encode(query)
def str_decode(self, query):
return str_decode(query)
def file_encode(self, type, query):
return file_encode(type, query)
def file_decode(self, query):
return file_decode(query)
def escape(self, msg):
if msg is None:
return ''
msg = msg.replace("&", "&")
msg = re.sub(r"&(#\d+|#[Xx][0-9A-Fa-f]+|[A-Za-z0-9]+);",
r"&\1;",
msg)
msg = msg.replace("<", "<")
msg = msg.replace(">", ">")
msg = msg.replace("\r", "")
msg = msg.replace("\n", "<br>")
return msg
def gateway_link(self, cginame, command):
var = {
'cginame': cginame,
'command': command,
'description': self.message.get('desc_'+command, ''),
}
return self.template('gateway_link', var)
def extension(self, suffix, use_merged=True):
filename = []
for i in os.listdir(config.abs_docroot):
if i.endswith('.%s' % suffix) and \
(not (i.startswith('.') or i.startswith('_'))):
filename.append(i)
elif use_merged and i == '__merged.%s' % suffix:
return [i]
filename.sort()
return filename
def menubar(self, id='', rss=''):
var = {
'id': id,
'rss': rss,
}
return self.template('menubar', var)
def header(self, title='', rss='',
cookie=None, deny_robot=False):
'''Print CGI and HTTP header.
'''
if rss == '':
rss = self.gateway_cgi + '/rss'
form = cgi.FieldStorage(environ=self.environ, fp=BytesIO())
if form.getfirst('__debug_js'):
js = self.extension('js', False)
else:
self.jscache.update()
js = []
var = {
'title': title,
'str_title': self.str_encode(title),
'rss': rss,
'cookie': cookie,
'deny_robot': deny_robot,
'mergedjs': self.jscache,
'js': js,
'css': self.extension('css'),
'menubar': self.menubar('top', rss)
}
self.stdout.write(self.template('header', var))
def footer(self, menubar=None):
self.stdout.write(self.template('footer', {'menubar': menubar}))
def localtime(self, stamp=0):
"""Return YYYY-mm-dd HH:MM."""
return time.strftime('%Y-%m-%d %H:%M', time.localtime(int(stamp)))
def rfc822_time(self, stamp=0):
"""Return date and time in RFC822 format."""
return time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime(int(stamp)))
def res_anchor(self, id, appli, title, absuri=False):
title = self.str_encode(title)
if absuri:
prefix = config.gateway_protocol + '://' + self.host
innerlink = ''
else:
prefix = ''
innerlink = ' class="innerlink"'
return '<a href="%s%s%s%s/%s"%s>' % \
(prefix, appli, self.sep, title, id, innerlink)
def html_format(self, plain, appli, title, absuri=False):
buf = plain.replace("<br>", "\n")
buf = buf.expandtabs()
buf = self.escape(buf)
buf = re.sub(r"https?://[^\x00-\x20\"'()<>\[\]\x7F-\xFF]{2,}",
r'<a href="\g<0>">\g<0></a>',
buf)
buf = re.sub(r"(>>)([0-9a-f]{8})",
self.res_anchor(r"\2", appli, title, absuri=absuri) +
r"\g<0></a>",
buf)
buf = re.sub(r'\[\[<a.*?>(.*?)\]\]</a>', r'[[\1]]', buf)
tmp = ""
while buf:
m = re.search(r"\[\[([^<>]+?)\]\]", buf)
if m is not None:
tmp += buf[:m.start()]
tmp += self.bracket_link(m.group(1), appli, absuri=absuri)
buf = buf[m.end():]
else:
tmp += buf
buf = ""
return self.escape_space(tmp)
def bracket_link(self, link, appli, absuri=False):
"""Encode bracket string to link.
See WikiWikiWeb.
"""
if absuri:
prefix = config.gateway_protocol + '://' + self.host
else:
prefix = ''
m = re.search(r"^/(thread)/([^/]+)/([0-9a-f]{8})$", link)
if m is not None:
uri = prefix + self.thread_cgi + self.sep + \
self.str_encode(m.group(2)) + \
'/' + m.group(3)
return '<a href="' + uri + '" class="reclink">[[' + link + ']]</a>'
m = re.search(r"^/(thread)/([^/]+)$", link)
if m is not None:
uri = prefix + self.appli[m.group(1)] + self.sep + \
self.str_encode(m.group(2))
return '<a href="' + uri + '">[[' + link + ']]</a>'
m = re.search(r"^([^/]+)/([0-9a-f]{8})$", link)
if m is not None:
uri = prefix + appli + self.sep + \
self.str_encode(m.group(1)) + \
'/' + m.group(2)
return '<a href="' + uri + '" class="reclink">[[' + link + ']]</a>'
m = re.search(r"^([^/]+)$", link)
if m is not None:
uri = prefix + appli + self.sep + \
self.str_encode(m.group(1))
return '<a href="' + uri + '">[[' + link + ']]</a>'
return "[[" + link + "]]"
def remove_file_form(self, cache, title=''):
var = {
'cache': cache,
'title': title,
}
self.stdout.write(self.template('remove_file_form', var))
def mch_url(self):
path = '/2ch/subject.txt'
if not config.enable2ch:
return ''
if config.server_name:
return '//' + config.server_name + path
host = re.sub(r':\d+', '', self.environ.get('HTTP_HOST', ''))
if not host:
return ''
return '//%s:%d%s' % (host, config.dat_port, path)
def mch_categories(self):
if not config.enable2ch:
return []
mch_url = self.mch_url()
categories = []
# my tags
with opentext(config.run_dir + '/tag.txt') as f:
tags = [t.strip() for t in f]
for tag in tags:
cat_url = mch_url.replace('2ch', file_encode('2ch', tag))
categories.append({'url': cat_url, 'text': tag})
return categories
def print_jump(self, next):
'''Print jump script.'''
var = {
'next': next,
}
self.stdout.write(self.template('jump', var))
def print302(self, next):
"""Print CGI header (302 moved temporarily)."""
self.header("Loading...")
self.print_jump(next)
self.footer()
def print403(self):
'''Print CGI header (403 forbidden).'''
self.header(self.message['403'], deny_robot=True)
self.print_paragraph(self.message['403_body'])
self.footer()
def print404(self, cache=None, id=None):
'''Print CGI header (404 not found).'''
self.header(self.message['404'], deny_robot=True)
self.print_paragraph(self.message['404_body'])
if cache is not None:
self.remove_file_form(cache)
self.footer()
def lock(self):
if self.isadmin:
lockfile = config.admin_search
else:
lockfile = config.search_lock
if not os.path.isfile(lockfile):
f = open(lockfile, 'wb')
f.close()
return True
elif os.path.getmtime(lockfile) + config.search_timeout < time.time():
f = open(lockfile, 'wb')
f.close()
return True
else:
return False
def unlock(self):
if self.isadmin:
lockfile = config.admin_search
else:
lockfile = config.search_lock
try:
os.remove(lockfile)
except (OSError, IOError) as err:
self.stderr.write('%s: OSError/IOError: %s\n' % (lockfile, err))
return False
def get_cache(self, cache):
'''Search cache from network.'''
result = cache.search()
self.unlock()
return result
def print_new_element_form(self, parent=None):
if not (self.isadmin or self.isfriend):
return
var = {
'datfile': '',
'cginame': self.gateway_cgi,
}
self.stdout.write(self.template('new_element_form', var))
def error_time(self):
from random import gauss
return int(gauss(time.time(), config.time_error))
def do_post(self, path, form):
"""Post article."""
import base64
try:
attach = form['attach']
except KeyError:
attach = None
str_attach = ''
if (attach is not None) and attach.file:
if len(attach.value) > config.record_limit*1024:
self.header(self.message["big_file"], deny_robot=True)
self.footer()
return None
if isinstance(attach.value, str):
attach_value = attach.value.encode('utf-8', 'replace')
else:
attach_value = attach.value
b64attach = base64.encodebytes(attach_value)
str_attach = str(b64attach, 'utf-8', 'replace').replace("\n", "")
guess_suffix = "txt"
if (attach is not None) and attach.filename:
found = re.search(r"\.([^.]+)$", attach.filename)
if found:
guess_suffix = found.group(1).lower()
suffix = form.getfirst("suffix", "")
if (suffix == "") or (suffix == "AUTO"):
suffix = guess_suffix
elif suffix.startswith("."):
suffix = suffix[1:].lower()
else:
suffix = suffix.lower()
suffix = re.sub(r"[^0-9A-Za-z]", "", suffix)
if form.getfirst("error", "") != "":
stamp = self.error_time()
else:
stamp = int(time.time())
body = {}
value = form.getfirst("body", "")
if value != "":
body["body"] = self.escape(value)
if str_attach != "":
body["attach"] = str_attach
body["suffix"] = re.sub(r"[\r\n]", "", suffix)
if not body:
self.header(self.message["null_article"], deny_robot=True)
self.footer()
return None
for key in ("base_stamp", "base_id", "name", "mail"):
value = form.getfirst(key, "")
if value != "":
body[key] = self.escape(value)
if not body:
self.header(self.message["null_article"], deny_robot=True)
self.footer()
return None
cache = Cache(form.getfirst("file"))
rec = Record(datfile=cache.datfile)
passwd = form.getfirst("passwd", "")
id = rec.build(stamp, body, passwd=passwd)
proxy_client = self.environ.get('HTTP_X_FORWARDED_FOR', 'direct')
self.stderr.write('post %s/%d_%s from %s/%s\n' %
(cache.datfile, stamp, id,
self.remoteaddr, proxy_client))
if len(rec.recstr) > config.record_limit*1024:
self.header(self.message['big_file'], deny_robot=True)
self.footer()
return None
elif spam.check(rec.recstr) or form.getfirst('homepage', '') != '':
self.header(self.message['spam'], deny_robot=True)
self.footer()
return None
if cache.exists():
cache.add_data(rec)
cache.sync_status()
else:
self.print404()
return None
if form.getfirst("dopost", "") != "":
queue = UpdateQueue()
queue.append(cache.datfile, stamp, id, None)
queue.start()
return id[:8]
def check_get_cache(self):
agent = self.environ.get("HTTP_USER_AGENT", "")
if not (self.isfriend or self.isadmin):
return False
elif re.search(config.robot, agent):
return False
elif self.lock():
return True
else:
return False
def check_visitor(self):
return self.isadmin or self.isfriend or self.isvisitor
def escape_space(self, text):
text = re.sub(r' ', ' ', text)
text = re.sub(r'<br> ', '<br> ', text)
text = re.sub(r'^ ', ' ', text)
text = re.sub(r' $', ' ', text)
text = text.replace('<br>', '<br />\n');
return text
def escape_js(self, text):
return text.replace('"', r'\"').replace(']]>', '');
def make_list_item(self, cache,
remove=True, target='changes', search=False):
x = self.file_decode(cache.datfile)
if not x:
return ''
y = self.str_encode(x)
if self.filter and self.filter not in x.lower():
return ''
elif self.tag:
matchtag = False
if target == 'recent':
cache_tags = cache.tags + cache.sugtags
else:
cache_tags = cache.tags
for t in cache_tags:
if str(t).lower() == self.tag:
matchtag = True
break
if not matchtag:
return ''
x = self.escape_space(x)
if search:
str_opts = '?search_new_file=yes'
else:
str_opts = ''
if target != 'recent':
sugtags = []
else:
sugtags = []
str_tags = [str(t).lower() for t in cache.tags]
for st in cache.sugtags:
if str(st).lower() not in str_tags:
sugtags.append(st)
var = {
'cache': cache,
'title': x,
'str_title': y,
'tags': cache.tags,
'sugtags': sugtags,
'target': target,
'remove': remove,
'str_opts': str_opts,
}
return self.template('list_item', var)
def print_index_list(self, cachelist,
target='', footer=True, search_new_file=False):
var = {
'target': target,
'filter': self.str_filter,
'tag': self.str_tag,
'taglist': UserTagList(),
'cachelist': cachelist,
'search_new_file': search_new_file,
}
self.stdout.write(self.template('index_list', var))
if footer:
self.print_new_element_form();
self.footer()
def print_paragraph(self, contents):
var = {'contents': contents}
self.stdout.write(self.template('paragraph', var))
# End of CGI
| 32.719325
| 79
| 0.516524
|
4410440ed496956dc0bd8485eb299823c688c96f
| 1,016
|
py
|
Python
|
app/services/roles.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 1
|
2022-03-21T07:37:02.000Z
|
2022-03-21T07:37:02.000Z
|
app/services/roles.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 25
|
2022-01-16T13:18:21.000Z
|
2022-03-29T13:08:19.000Z
|
app/services/roles.py
|
NewShadesDAO/api
|
1e66336f0ea526f245918ecdc328c9a66280be91
|
[
"CC0-1.0"
] | 1
|
2022-01-15T21:42:00.000Z
|
2022-01-15T21:42:00.000Z
|
from bson import ObjectId
from app.helpers.cache_utils import cache
from app.helpers.permissions import Permission, needs
from app.models.user import Role, User
from app.schemas.users import RoleCreateSchema
from app.services.crud import create_item, get_items
@needs(permissions=[Permission.ROLES_LIST])
async def get_roles(server_id: str, current_user: User):
return await get_items(filters={"server": ObjectId(server_id)}, result_obj=Role)
@needs(permissions=[Permission.ROLES_CREATE])
async def create_role(server_id: str, role_model: RoleCreateSchema, current_user: User, internal=False):
role_model.server = server_id
role_name = role_model.name
if not internal and role_name.strip().startswith("@"):
raise Exception("Roles starting with '@' are protected.")
role = await create_item(role_model, result_obj=Role, current_user=current_user, user_field=None)
await cache.client.hset(f"server:{server_id}", f"roles.{str(role.pk)}", ",".join(role.permissions))
return role
| 42.333333
| 104
| 0.770669
|
c9948ef29439233d873edccc2339e8dcd01df540
| 1,123
|
py
|
Python
|
src/rainbow.py
|
MukhriddinMike/SomeGames
|
3f347967cfbe61dd3d68e1c7288d4ee3e161387c
|
[
"FSFAP"
] | null | null | null |
src/rainbow.py
|
MukhriddinMike/SomeGames
|
3f347967cfbe61dd3d68e1c7288d4ee3e161387c
|
[
"FSFAP"
] | null | null | null |
src/rainbow.py
|
MukhriddinMike/SomeGames
|
3f347967cfbe61dd3d68e1c7288d4ee3e161387c
|
[
"FSFAP"
] | null | null | null |
# Rainbow, by Al Sweigart al@inventwithpython.com
# Shows a simple rainbow animation.
import time, sys
assert sys.version_info.major == 3, 'Run this program on Python 3.'
try:
import bext
except:
sys.exit('Bext is required to run this. Run `pip install bext` from the shell to install it.')
indent = 0 # How many spaces to indent.
indentIncreasing = True # Whether the indentation is increasing or not.
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
time.sleep(0.05) # Add a slight pause.
| 25.522727
| 98
| 0.581478
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.