hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c05529f25572e05b9c041e702b17a514852babb4
| 10,921
|
py
|
Python
|
app.py
|
sreelal1/Sentiment-Analysis_NLP
|
7c37e6621fd728abd986be806fdbdb1eb80c4fa0
|
[
"MIT"
] | null | null | null |
app.py
|
sreelal1/Sentiment-Analysis_NLP
|
7c37e6621fd728abd986be806fdbdb1eb80c4fa0
|
[
"MIT"
] | null | null | null |
app.py
|
sreelal1/Sentiment-Analysis_NLP
|
7c37e6621fd728abd986be806fdbdb1eb80c4fa0
|
[
"MIT"
] | null | null | null |
import pickle
import pandas as pd
import numpy as np
import webbrowser
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import plotly
import plotly.express as px
import sqlite3 as sql
conn = sql.connect('Prediction.db')
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
project_name = None
# In[33]:
def load_model():
global pickle_model
file = open("pickle_model.pkl", 'rb')
pickle_model = pickle.load(file)
global vocab
file = open("feature.pkl", 'rb')
vocab = pickle.load(file)
# In[34]:
# def open_browser():
# webbrowser.open_new('https://publicservants.in')
# In[35]:
def check_review(reviewText):
transformer = TfidfTransformer()
loaded_vec = CountVectorizer(decode_error="replace",vocabulary=vocab)
vectorised_review = transformer.fit_transform(loaded_vec.fit_transform([reviewText]))
return pickle_model.predict(vectorised_review)
# In[36]:
# def load_data():
# global df
# df = pd.read_csv('balanced_review.csv')
# df.dropna(inplace = True)
# df = df[df['overall'] != 3]
# df['Positivity'] = np.where(df['overall'] > 3, 1, 0 )
# df['Names'] = np.where(df['Positivity']==1,'Positive','Negative')
# global labels
# labels = df['Names'].tolist()
# In[37]:
def load_scrappeddata():
global df
df=pd.read_sql('SELECT * FROM Predicted', conn)
dfn=df[df['predictedvalue']==0]
dfn=dfn.iloc[:6,:]
dfp=df[df['predictedvalue']==1]
dfp=dfp.iloc[:6,:]
df1=pd.concat([dfp,dfn],ignore_index=True)
global reviews
reviews = []
for i in range(len(df1)):
reviews.append({'label':df1['reviews'][i],'value':i})
# In[38]:
# def predict_scrappeddata():
# global sentiment
# sentiment = []
# for i in range (len(df1['reviews'])):
# response = check_review(df1['reviews'][i])
# if (response[0]==1):
# sentiment.append('Positive')
# elif (response[0] ==0 ):
# sentiment.append('Negative')
# else:
# sentiment.append('Unknown')
# In[57]:
def create_app_ui():
pie_chart=px.pie(
data_frame=df,
values=[df['predictedvalue'].value_counts()[1],df['predictedvalue'].value_counts()[0]],
names=['Positive Reviews','Negative Reviews'],
color=['Positive Reviews','Negative Reviews'],
color_discrete_sequence=['Green','Red'],
#title='Distribution of model prediction of scrapped data',
width=600,
height=380,
hole=0.5,
)
main_layout = html.Div(
[
html.Hr(),
html.H1(id = 'Main_title', children = 'Sentiment analysis with insights',
style={'text-align':'center','color':'red'}),
html.Hr(),
dbc.Row([
dbc.Col(
html.Div([
html.H2(children='Distriution of scrapped reviews'),
dcc.Graph(
id='pie_graph',
figure=pie_chart)
],
style={'display': 'inline-block', 'vertical-align': 'top', 'margin-left': '3vw', 'margin-top': '3vw'}
)
),dbc.Col(
html.Div(
[
html.H2(children='Etsy reviews'),
dcc.Dropdown(
id = 'reviewpicker',
options = reviews,
value=None,
optionHeight=70,
style = {'margin-bottom': '30px','min-width':'670px','padding-top':'25px'}
),
dbc.Button(
id="check_review", children='Submit',
color = 'dark',style={'margin':'0 45%','padding':'5px 15px'}
),
html.Div(id='container1',style={'padding-top':'15px'})
],
style={'display': 'inline-block', 'vertical-align': 'top', 'margin-left': '3vw', 'margin-top': '3vw'}),
)]),
dbc.Row([
dbc.Col([
html.Div(
[
html.H2('Try it yourself!'),
dcc.Textarea(
id = 'textarea_review',
placeholder = 'Enter the review here...',
style={'width':'650px','height':'300'}
),
html.Div(id='container2',style={'padding':'15px 15px 15px 10px'})
],
style={'display': 'inline-block', 'vertical-align': 'top', 'margin-left': '3vw', 'margin-top': '3vw'}
)
]),
dbc.Col([
html.Div(
[
html.Div([
html.H2('Word Cloud'),
dbc.Button("ALL Words",
id="allbt",
outline=True,
color="info",
className="mr-1",
n_clicks_timestamp=0,
style={'padding':'10px','padding-right':'15px'}
),
dbc.Button("Positve Words",
id="posbt",
outline=True,
color="success",
className="mr-1",
n_clicks_timestamp=0,
style={'padding':'10px','padding-right':'15px'}
),
dbc.Button("Negative Words",
id="negbt",
outline=True,
color="danger",
className="mr-1",
n_clicks_timestamp=0,
style={'padding':'10px','padding-right':'15px'}
)
],style={'padding-left':'15px'}
),
html.Div(id='container',style={'padding':'15px'})
],
style={'display': 'inline-block', 'vertical-align': 'top', 'margin-left': '3vw', 'margin-top': '3vw'}
)])
])
],
style={"height": "100vh","background-color": "#d3d3d3" , "width" : "100%"}
)
return main_layout
# In[40]:
@app.callback(
Output('container','children'),
[
Input('allbt','n_clicks_timestamp'),
Input('posbt','n_clicks_timestamp'),
Input('negbt','n_clicks_timestamp'),
]
)
def wordcloudbutton(allbt,posbt,negbt):
if int(allbt) > int(posbt) and int(allbt)>int(negbt):
return html.Div([
html.Img(src=app.get_asset_url('wholeword.png'))])
elif int(posbt) > int(allbt) and int(posbt)>int(negbt):
return html.Div([
html.Img(src=app.get_asset_url('posword.png'))
])
elif int(negbt) > int(allbt) and int(negbt) > int(posbt):
return html.Div([
html.Img(src=app.get_asset_url('negword.png'))
])
else:
pass
# In[41]:
@app.callback(
Output('container2', 'children'),
[
Input('textarea_review', 'value')
]
# ,
# [
# State('textarea_review', 'value')
# ]
)
def review_predict(textarea_value):
# print("Data Type = ", str(type(n_clicks)))
# print("Value = ", str(n_clicks))
# print("Data Type = ", str(type(textarea_value)))
# print("Data Type = ", str(textarea_value))
response = check_review(textarea_value)
#if (n_clicks > 0):
if (response[0] == 0 ):
return html.Div([
dbc.Alert("Its a negative review", color="danger")
])
#result = 'Negative'
elif (response[0] == 1 ):
return html.Div([
dbc.Alert("Its a positive review", color="success")
])
#result = 'Positive'
else:
return ""
#result = 'Unknown'
#return result
# else:
# return ""
# In[42]:
# @app.callback(
# Output('result', 'style'),
# [
# Input('button_review', 'n_clicks')
# ]
# ,
# [
# State('textarea_review', 'value')
# ]
# )
# def review_predict(n_clicks,textarea_value):
# print("Data Type = ", str(type(n_clicks)))
# print("Value = ", str(n_clicks))
# print("Data Type = ", str(type(textarea_value)))
# print("Data Type = ", str(textarea_value))
# response = check_review(textarea_value)
# if (n_clicks > 0):
# if (response[0] == 0 ):
# result = {'color':'red'}
# elif (response[0] == 1 ):
# result = {'color':'green'}
# else:
# result = 'Unknown'
# return result
# else:
# return ""
# In[43]:
@app.callback(
Output('container1','children'),
[
Input('check_review','n_clicks')
],
[
State('reviewpicker','value')
])
def review_predict2(n_clicks,value):
review_selected = reviews[value]['label']
response = check_review(review_selected)
if (n_clicks>0):
if (response[0]==0):
return html.Div([
dbc.Alert("Its a negative review", color="danger")
])
#result = 'Negative'
elif (response[0]==1):
return html.Div([
dbc.Alert("Its a Positive review", color="success")
])
#result = 'Positive'
else:
return ""
#return result
else:
return ""
# In[44]:
# @app.callback(
# Output('result2','style'),
# [
# Input('check_review','n_clicks')
# ],
# [
# State('reviewpicker','value')
# ])
# def review_predict2(n_clicks,value):
# review_selected = reviews[value]['label']
# response = check_review(review_selected)
# if (n_clicks>0):
# if (response[0]==0):
# result = {'color':'red'}
# elif (response[0]==1):
# result = {'color':'green'}
# else:
# result = 'Unknown'
# return result
# else:
# return ""
# In[58]:
def main():
print("Start of my project")
load_model()
#load_data()
load_scrappeddata()
#predict_scrappeddata()
project_name = 'Sentiment Analysis with Insights'
print(project_name)
#open_browser()
app.title = project_name
app.layout = create_app_ui()
app.run_server()
print("End of my Project")
if __name__ == '__main__':
main()
| 27.930946
| 115
| 0.492629
|
bea78e2d282f2eb0fe17827635d27f5031851249
| 2,624
|
py
|
Python
|
preprocessing/util/quadtree.py
|
UniStuttgart-VISUS/spatiotemporal1d
|
edff0aab5c8c5b5213f71715f98716b53aa06ac0
|
[
"Apache-2.0"
] | 3
|
2021-06-29T22:41:11.000Z
|
2022-01-31T12:57:29.000Z
|
preprocessing/util/quadtree.py
|
UniStuttgart-VISUS/spatiotemporal1d
|
edff0aab5c8c5b5213f71715f98716b53aa06ac0
|
[
"Apache-2.0"
] | 1
|
2021-11-25T09:46:10.000Z
|
2021-11-25T11:42:09.000Z
|
preprocessing/util/quadtree.py
|
UniStuttgart-VISUS/spatiotemporal1d
|
edff0aab5c8c5b5213f71715f98716b53aa06ac0
|
[
"Apache-2.0"
] | null | null | null |
import math
from functools import namedtuple
Point = namedtuple('Point', ('x', 'y', 'data'))
class Node:
def __init__(self, x0, y0, x1, y1, datum):
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
self.datum = datum
self.children = None
class Quadtree:
def __init__(self, x0, y0, x1, y1):
self.root = Node(x0, y0, x1, y1, None)
def add_point(self, p):
if math.isnan(p.x) or math.isnan(p.y) or math.isinf(p.x) or math.isinf(p.y):
import sys, json
sys.stderr.write(F'Invalid coordinates for {p}\n')
json.dump(p.data, sys.stderr, indent=2, default=lambda x: x.__dict__)
sys.exit(1)
try:
_recursive_add_point(self.root, p)
except RecursionError:
import sys, json
sys.stderr.write(F'Recursion Error @ {p}\n')
sys.stderr.write(F' {p.data.name, p.data.lat, p.data.lng}\n')
sys.stderr.write(F' Root {self.root.x0} {self.root.y0} {self.root.x1} {self.root.y1}\n')
sys.stderr.write('Tree:\n')
def rec(node):
if node.datum is not None:
sys.stderr.write(F' {node.datum.data.name}, {node.datum.data.lat}, {node.datum.data.lng}, {node.datum.x}, {node.datum.y}\n')
elif node.children is not None:
for child in node.children:
rec(child)
rec(self.root)
sys.exit(1)
def _recursive_add_point(node, point):
if node.children is None and node.datum is None:
# empty leaf node
node.datum = point
elif node.children is None:
# occupied leaf node
x0 = node.x0
x1 = node.x1
y0 = node.y0
y1 = node.y1
w = x1 - x0
h = y1 - y0
# split
node.children = [
Node(x0, y0, x0 + w/2, y0 + h/2, None),
Node(x0 + w/2, y0, x1, y0 + h/2, None),
Node(x0, y0 + h/2, x0 + w/2, y1, None),
Node(x0 + w/2, y0 + h/2, x1, y1, None)
]
oldchild = node.datum
node.datum = None
# add both points
_recursive_add_point(node, oldchild)
_recursive_add_point(node, point)
else:
# non-leaf node
#
# 0 | 1
# -----
# 2 | 3
#
xm = (node.x1 + node.x0) / 2
ym = (node.y1 + node.y0) / 2
idx = 0
if point.x >= xm:
idx += 1
if point.y >= ym:
idx += 2
_recursive_add_point(node.children[idx], point)
| 26.77551
| 145
| 0.498476
|
8448fe74ac50d42b44d2fac76f5fad759cf26bb5
| 198
|
py
|
Python
|
lessons/lesson3/t201.py
|
kcfkwok2003/Simp_py
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
[
"MIT"
] | null | null | null |
lessons/lesson3/t201.py
|
kcfkwok2003/Simp_py
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
[
"MIT"
] | null | null | null |
lessons/lesson3/t201.py
|
kcfkwok2003/Simp_py
|
f75e66da01b45dc8688dda602f8b33d4258f0c31
|
[
"MIT"
] | null | null | null |
# t201.py
from machine import Pin
from simp_py import tft
p21= Pin(21, Pin.IN)
while True:
if p21.value()==0:
tft.tft.text(0,100," on")
else:
tft.tft.text(0,100,"off")
time.sleep(0.1)
| 18
| 29
| 0.641414
|
0a9b5f450053fa6c990011c706e8320dd1c870a9
| 8,960
|
py
|
Python
|
pipenv/patched/notpip/_internal/utils/unpacking.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 23
|
2017-01-20T01:18:31.000Z
|
2017-01-20T17:25:11.000Z
|
pipenv/patched/notpip/_internal/utils/unpacking.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 1
|
2017-01-20T05:13:58.000Z
|
2017-01-20T05:13:58.000Z
|
pipenv/patched/notpip/_internal/utils/unpacking.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | null | null | null |
"""Utilities related archives.
"""
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from typing import Iterable, List, Optional
from zipfile import ZipInfo
from pipenv.patched.notpip._internal.exceptions import InstallationError
from pipenv.patched.notpip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pipenv.patched.notpip._internal.utils.misc import ensure_dir
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug("bz2 module is not available")
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug("lzma module is not available")
def current_umask() -> int:
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path: str) -> List[str]:
path = path.lstrip("/").lstrip("\\")
if "/" in path and (
("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
):
return path.split("/", 1)
elif "\\" in path:
return path.split("\\", 1)
else:
return [path, ""]
def has_leading_dir(paths: Iterable[str]) -> bool:
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory: str, target: str) -> bool:
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def set_extracted_file_to_default_mode_plus_executable(path: str) -> None:
"""
Make file present at path have execute for user/group/world
(chmod +x) is no-op on windows per python docs
"""
os.chmod(path, (0o777 & ~current_umask() | 0o111))
def zip_item_is_executable(info: ZipInfo) -> bool:
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
def unzip_file(filename: str, location: str, flatten: bool = True) -> None:
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, "rb")
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
"The zip file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith("/") or fn.endswith("\\"):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
if zip_item_is_executable(info):
set_extracted_file_to_default_mode_plus_executable(fn)
finally:
zipfp.close()
def untar_file(filename: str, location: str) -> None:
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
mode = "r:gz"
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = "r:bz2"
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = "r:xz"
elif filename.lower().endswith(".tar"):
mode = "r"
else:
logger.warning(
"Cannot determine compression type for file %s",
filename,
)
mode = "r:*"
tar = tarfile.open(filename, mode, encoding="utf-8")
try:
leading = has_leading_dir([member.name for member in tar.getmembers()])
for member in tar.getmembers():
fn = member.name
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
"The tar file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, path, location))
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
ensure_dir(os.path.dirname(path))
assert fp is not None
with open(path, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
set_extracted_file_to_default_mode_plus_executable(path)
finally:
tar.close()
def unpack_file(
filename: str,
location: str,
content_type: Optional[str] = None,
) -> None:
filename = os.path.realpath(filename)
if (
content_type == "application/zip"
or filename.lower().endswith(ZIP_EXTENSIONS)
or zipfile.is_zipfile(filename)
):
unzip_file(filename, location, flatten=not filename.endswith(".whl"))
elif (
content_type == "application/x-gzip"
or tarfile.is_tarfile(filename)
or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
"Cannot unpack file %s (downloaded from %s, content-type: %s); "
"cannot detect archive format",
filename,
location,
content_type,
)
raise InstallationError(f"Cannot determine archive format of {location}")
| 34.594595
| 85
| 0.579911
|
c7dedc3412466e3344ec5f98c60dd41f4b02ca92
| 1,576
|
py
|
Python
|
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
server/db/test.py
|
e7/treasure-box
|
1ad4818e7535b80a3ad3e0cb74087a2ce43a9dc8
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# -*- coding:utf-8 -*-
import socket
import errno
import struct
import time
import json
if "__main__" == __name__:
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
skt.connect(("127.0.0.1", 8889))
'''
context = json.dumps({"interface":"insert", "email":"jackzxty@126.com"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
context = json.dumps({"interface":"update", "uid":"1", "email":"chg"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
context = json.dumps({"interface":"delete", "uid":"16"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
'''
context = json.dumps({"interface":"select", "uid":"26"})
data = struct.pack("!4I", 1000, 16, len(context), 0)
skt.sendall(data + context)
rsp = skt.recv(4096)
version, start, length, checksum = struct.unpack("!4I", rsp[0:16])
print version, start, length, checksum
print(json.loads(rsp[16:]))
| 32.163265
| 76
| 0.625
|
e15709a1dd8c9b23dacaffe350652984a43de7df
| 213
|
py
|
Python
|
programming_language_manuals/Python/python3tutorial/tutor.9.3.4.py
|
darbinreyes/subparprogrammer
|
e3588adc494a69a564fe0a9859ff296fc710eab0
|
[
"MIT"
] | null | null | null |
programming_language_manuals/Python/python3tutorial/tutor.9.3.4.py
|
darbinreyes/subparprogrammer
|
e3588adc494a69a564fe0a9859ff296fc710eab0
|
[
"MIT"
] | 1
|
2021-05-11T22:20:01.000Z
|
2021-05-11T22:20:01.000Z
|
programming_language_manuals/Python/python3tutorial/tutor.9.3.4.py
|
darbinreyes/subparprogrammer
|
e3588adc494a69a564fe0a9859ff296fc710eab0
|
[
"MIT"
] | null | null | null |
class MyClass:
"""A simple example class"""
i = 12345
def f(self):
return 'hello world'
# Error ? ANS: Yes. You need to pass in self.
# MyClass.f()
x = MyClass()
print(MyClass.f(x)) # Works
| 16.384615
| 45
| 0.591549
|
b6062905b256433cd15dfc069709422aecfcbfdc
| 6,307
|
py
|
Python
|
.github/scripts/core_checker.py
|
nemausa/FreeRTOS
|
096f8e27f8016acca10f7bc5ba8efab3817347ce
|
[
"MIT"
] | null | null | null |
.github/scripts/core_checker.py
|
nemausa/FreeRTOS
|
096f8e27f8016acca10f7bc5ba8efab3817347ce
|
[
"MIT"
] | null | null | null |
.github/scripts/core_checker.py
|
nemausa/FreeRTOS
|
096f8e27f8016acca10f7bc5ba8efab3817347ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# python >= 3.4
import os
from common.header_checker import HeaderChecker
#--------------------------------------------------------------------------------------------------
# CONFIG
#--------------------------------------------------------------------------------------------------
FREERTOS_IGNORED_EXTENSIONS = [
'.1',
'.ASM',
'.C',
'.DSW',
'.G_C',
'.H',
'.Hbp',
'.IDE',
'.LIB',
'.Opt',
'.PC',
'.PRM',
'.TXT',
'.URL',
'.UVL',
'.Uv2',
'.a',
'.ac',
'.am',
'.atsln',
'.atstart',
'.atsuo',
'.bash',
'.bat',
'.bbl',
'.bit',
'.board',
'.bsb',
'.bsdl',
'.bts',
'.ccxml',
'.cdkproj',
'.cdkws',
'.cfg',
'.cgp',
'.checksum',
'.cmake',
'.cmd',
'.config',
'.cpp',
'.cproj',
'.cproject',
'.crun',
'.css',
'.csv',
'.custom_argvars',
'.cxx',
'.cydwr',
'.cyprj',
'.cysch',
'.dat',
'.datas',
'.db',
'.dbgdt',
'.dep',
'.dni',
'.dnx',
'.doc',
'.dox',
'.doxygen',
'.ds',
'.dsk',
'.dtd',
'.dts',
'.elf',
'.emProject',
'.env_conf',
'.ewd',
'.ewp',
'.ewt',
'.eww',
'.exe',
'.filters',
'.flash',
'.fmt',
'.ftl',
'.gdb',
'.gif',
'.gise',
'.gld',
'.gpdsc',
'.gui',
'.h_from_toolchain',
'.hdf',
'.hdp',
'.hex',
'.hist',
'.history',
'.hsf',
'.htm',
'.html',
'.hwc',
'.hwl',
'.hwp',
'.hws',
'.hzp',
'.hzs',
'.i',
'.icf',
'.ide',
'.idx',
'.in',
'.inc',
'.include',
'.index',
'.inf',
'.ini',
'.init',
'.ipcf',
'.ise',
'.jlink',
'.json',
'.la',
'.launch',
'.lcf',
'.ld',
'.lds',
'.lib',
'.lk1',
'.lkr',
'.lm',
'.lo',
'.lock',
'.lsl',
'.lst',
'.m4',
'.mac',
'.make',
'.map',
'.mbt',
'.mcp',
'.mcpar',
'.mcs',
'.mcw',
'.md',
'.mdm',
'.mem',
'.mhs',
'.mk',
'.mk1',
'.mmi',
'.mrt',
'.mss',
'.mtpj',
'.nav',
'.ntrc_log',
'.opa',
'.opb',
'.opc',
'.opl',
'.opt',
'.opv',
'.out',
'.pack',
'.par',
'.patch',
'.pbd',
'.pdsc',
'.pe',
'.pem',
'.pgs',
'.pl',
'.plg',
'.png',
'.prc',
'.pref',
'.prefs',
'.prj',
'.project',
'.properties',
'.ps1',
'.ptf',
'.py',
'.r79',
'.rapp',
'.rc',
'.reggroups',
'.reglist',
'.resc',
'.resources',
'.rom',
'.rprj',
'.s79',
'.s82',
'.s90',
'.sc',
'.scf',
'.scfg',
'.script',
'.sct',
'.scvd',
'.session',
'.sfr',
'.sh',
'.shtml',
'.sig',
'.sln',
'.spec',
'.sprj',
'.stf',
'.stg',
'.suo',
'.sup',
'.svg',
'.tags',
'.tcl',
'.tdt',
'.template',
'.tgt',
'.tps',
'.tra',
'.tree',
'.tws',
'.txt',
'.ucf',
'.url',
'.user',
'.ut',
'.uvmpw',
'.uvopt',
'.uvoptx',
'.uvproj',
'.uvprojx',
'.vcproj',
'.vcxproj',
'.version',
'.webserver',
'.wpj',
'.wsdt',
'.wsp',
'.wspos',
'.wsx',
'.x',
'.xbcd',
'.xcl',
'.xise',
'.xml',
'.xmp',
'.xmsgs',
'.xsl',
'.yml',
'.md',
'.zip'
]
FREERTOS_IGNORED_PATTERNS = [
r'.*\.git.*',
r'.*mbedtls_config\.h.*',
r'.*mbedtls_config\.h.*',
r'.*CMSIS.*',
r'.*/Nordic_Code/*',
r'.*/ST_Code/*',
r'.*/makefile',
r'.*/Makefile',
r'.*/printf-stdarg\.c.*',
r'.*/startup.*',
r'.*/Startup.*',
r'.*/trcConfig\.h.*',
r'.*/trcConfig\.c.*',
r'.*/trcSnapshotConfig\.h.*',
r'.*/MicroZed_hw_platform.*'
]
FREERTOS_IGNORED_FILES = [
'.cproject',
'.project',
'fyi-another-way-to-ignore-file.txt',
'mbedtls_config.h',
'requirements.txt',
'run-cbmc-proofs.py',
'.editorconfig',
'lcovrc',
'htif.c', 'htif.h',
'ethernetif.c',
'platform.c',
'platform.h',
'platform_config.h',
'FreeRTOS_asm_vectors.S',
'interrupt_vector.s',
'gdbinit'
]
FREERTOS_HEADER = [
'/*\n',
' * FreeRTOS V202112.00\n',
' * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n',
' *\n',
' * Permission is hereby granted, free of charge, to any person obtaining a copy of\n',
' * this software and associated documentation files (the "Software"), to deal in\n',
' * the Software without restriction, including without limitation the rights to\n',
' * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n',
' * the Software, and to permit persons to whom the Software is furnished to do so,\n',
' * subject to the following conditions:\n',
' *\n',
' * The above copyright notice and this permission notice shall be included in all\n',
' * copies or substantial portions of the Software.\n',
' *\n',
' * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n',
' * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n',
' * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n',
' * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n',
' * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n',
' * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n',
' *\n',
' * https://www.FreeRTOS.org\n',
' * https://github.com/FreeRTOS\n',
' *\n',
' */\n',
]
def main():
parser = HeaderChecker.configArgParser()
args = parser.parse_args()
# Configure the checks then run
checker = HeaderChecker(FREERTOS_HEADER)
checker.ignoreExtension(*FREERTOS_IGNORED_EXTENSIONS)
checker.ignorePattern(*FREERTOS_IGNORED_PATTERNS)
checker.ignoreFile(*FREERTOS_IGNORED_FILES)
checker.ignoreFile(os.path.split(__file__)[-1])
rc = checker.processArgs(args)
if rc:
checker.showHelp(__file__)
return rc
if __name__ == '__main__':
exit(main())
| 18.495601
| 99
| 0.445378
|
f460c70dde0f8790a5d4bbe8c8f48f595e84a839
| 65
|
py
|
Python
|
app/engine/graphics/ui_framework/premade_animations/__init__.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
app/engine/graphics/ui_framework/premade_animations/__init__.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
app/engine/graphics/ui_framework/premade_animations/__init__.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
from .animation_templates import *
from .text_animations import *
| 32.5
| 34
| 0.830769
|
1895ccab21dea2d007321d110459dd8edb832bee
| 40,061
|
py
|
Python
|
nova/tests/functional/test_server_group.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/test_server_group.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/test_server_group.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | 1
|
2020-11-02T10:17:13.000Z
|
2020-11-02T10:17:13.000Z
|
# Copyright 2015 Ericsson AB
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from oslo_config import cfg
from nova import context
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_network
from nova.tests.unit import policy_fixture
from nova.virt import fake
import nova.scheduler.utils
import nova.servicegroup
import nova.tests.unit.image.fake
# An alternate project id
PROJECT_ID_ALT = "616c6c796f7572626173656172656f73"
CONF = cfg.CONF
class ServerGroupTestBase(test.TestCase,
integrated_helpers.InstanceHelperMixin):
REQUIRES_LOCKING = True
api_major_version = 'v2.1'
microversion = None
_enabled_filters = (CONF.filter_scheduler.enabled_filters
+ ['ServerGroupAntiAffinityFilter',
'ServerGroupAffinityFilter'])
# Override servicegroup parameters to make the tests run faster
_service_down_time = 10
_report_interval = 1
anti_affinity = {'name': 'fake-name-1', 'policies': ['anti-affinity']}
affinity = {'name': 'fake-name-2', 'policies': ['affinity']}
def _get_weight_classes(self):
return []
def setUp(self):
super(ServerGroupTestBase, self).setUp()
self.flags(enabled_filters=self._enabled_filters,
group='filter_scheduler')
# NOTE(sbauza): Don't verify VCPUS and disks given the current nodes.
self.flags(cpu_allocation_ratio=9999.0)
self.flags(disk_allocation_ratio=9999.0)
self.flags(weight_classes=self._get_weight_classes(),
group='filter_scheduler')
self.flags(service_down_time=self._service_down_time)
self.flags(report_interval=self._report_interval)
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
self.api.microversion = self.microversion
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.start_service('scheduler')
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _boot_a_server_to_group(self, group,
expected_status='ACTIVE', flavor=None,
az=None):
server = self._build_minimal_create_server_request(self.api,
'some-server',
az=az)
if flavor:
server['flavorRef'] = ('http://fake.server/%s'
% flavor['id'])
post = {'server': server,
'os:scheduler_hints': {'group': group['id']}}
created_server = self.api.post_server(post)
self.assertTrue(created_server['id'])
# Wait for it to finish being created
found_server = self._wait_for_state_change(
self.admin_api, created_server, expected_status)
return found_server
class ServerGroupFakeDriver(fake.SmallFakeDriver):
"""A specific fake driver for our tests.
Here, we only want to be RAM-bound.
"""
vcpus = 1000
memory_mb = 8192
local_gb = 100000
# A fake way to change the FakeDriver given we don't have a possibility yet to
# modify the resources for the FakeDriver
def _fake_load_compute_driver(virtapi, compute_driver=None):
return ServerGroupFakeDriver(virtapi)
class ServerGroupTestV21(ServerGroupTestBase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
# TODO(sbauza): Remove that once there is a way to have a custom
# FakeDriver supporting different resources. Note that we can't also
# simply change the config option for choosing our custom fake driver
# as the mocked method only accepts to load drivers in the nova.virt
# tree.
self.stub_out('nova.virt.driver.load_compute_driver',
_fake_load_compute_driver)
fake.set_nodes(['compute'])
self.compute = self.start_service('compute', host='compute')
# NOTE(gibi): start a second compute host to be able to test affinity
# NOTE(sbauza): Make sure the FakeDriver returns a different nodename
# for the second compute node.
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2')
fake_network.set_stub_network_methods(self)
def test_get_no_groups(self):
groups = self.api.get_server_groups()
self.assertEqual([], groups)
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_create_wrong_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['wrong-policy']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('wrong-policy', ex.response.text)
def test_get_groups_all_projects(self):
# This test requires APIs using two projects.
# Create an API using project 'openstack1'.
# This is a non-admin API.
#
# NOTE(sdague): this is actually very much *not* how this
# fixture should be used. This actually spawns a whole
# additional API server. Should be addressed in the future.
api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
api_version=self.api_major_version,
project_id=PROJECT_ID_ALT)).api
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
# Project 'openstack' is used by self.api
group1 = self.anti_affinity
openstack_group = self.api.post_server_groups(group1)
# Create a server group in project 'openstack1'
group2 = self.affinity
openstack1_group = api_openstack1.post_server_groups(group2)
# The admin should be able to get server groups in all projects.
all_projects_admin = self.admin_api.get_server_groups(
all_projects=True)
self.assertIn(openstack_group, all_projects_admin)
self.assertIn(openstack1_group, all_projects_admin)
# The non-admin should only be able to get server groups
# in his project.
# The all_projects parameter is ignored for non-admin clients.
all_projects_non_admin = api_openstack1.get_server_groups(
all_projects=True)
self.assertNotIn(openstack_group, all_projects_non_admin)
self.assertIn(openstack1_group, all_projects_non_admin)
def test_create_duplicated_policy(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
def test_create_multiple_policies(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{"name": "fake-name-1",
"policies": ["anti-affinity", "affinity"]})
self.assertEqual(400, ex.response.status_code)
def _boot_servers_to_group(self, group, flavor=None):
servers = []
for _ in range(0, 2):
server = self._boot_a_server_to_group(group,
flavor=flavor)
servers.append(server)
return servers
def test_boot_servers_with_affinity(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_affinity_overquota(self):
# Tests that we check server group member quotas and cleanup created
# resources when we fail with OverQuota.
self.flags(server_group_members=1, group='quota')
# make sure we start with 0 servers
servers = self.api.get_servers(detail=False)
self.assertEqual(0, len(servers))
created_group = self.api.post_server_groups(self.affinity)
ex = self.assertRaises(client.OpenStackApiException,
self._boot_servers_to_group,
created_group)
self.assertEqual(403, ex.response.status_code)
# _boot_servers_to_group creates 2 instances in the group in order, not
# multiple servers in a single request. Since our quota is 1, the first
# server create would pass, the second should fail, and we should be
# left with 1 server and it's 1 block device mapping.
servers = self.api.get_servers(detail=False)
self.assertEqual(1, len(servers))
ctxt = context.get_admin_context()
servers = db.instance_get_all(ctxt)
self.assertEqual(1, len(servers))
ctxt_mgr = db_api.get_context_manager(ctxt)
with ctxt_mgr.reader.using(ctxt):
bdms = db_api._block_device_mapping_get_query(ctxt).all()
self.assertEqual(1, len(bdms))
self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
def test_boot_servers_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
# Using big enough flavor to use up the resources on the host
flavor = self.api.get_flavors()[2]
self._boot_servers_to_group(created_group, flavor=flavor)
# The third server cannot be booted as there is not enough resource
# on the host where the first two server was booted
failed_server = self._boot_a_server_to_group(created_group,
flavor=flavor,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def test_boot_servers_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
for server in servers:
self.assertIn(server['id'], members)
def test_boot_server_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
self._boot_servers_to_group(created_group)
# We have 2 computes so the third server won't fit into the same group
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('No valid host was found. '
'There are not enough hosts available.',
failed_server['fault']['message'])
def _rebuild_with_group(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'rebuild': {'imageRef':
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}}
self.api.post_server_action(servers[1]['id'], post)
rebuilt_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
self.assertEqual(post['rebuild']['imageRef'],
rebuilt_server.get('image')['id'])
return [servers[0], rebuilt_server]
def test_rebuild_with_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_group_no_valid_host(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
ex = self.assertRaises(client.OpenStackApiException,
self.admin_api.post_server_action,
servers[1]['id'], post)
self.assertEqual(400, ex.response.status_code)
self.assertIn('No valid host found for cold migrate', ex.response.text)
def test_migrate_with_group_no_valid_host(self):
for group in [self.affinity, self.anti_affinity]:
self._migrate_with_group_no_valid_host(group)
def test_migrate_with_anti_affinity(self):
# Start additional host to test migration with anti-affinity
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
migrated_server['OS-EXT-SRV-ATTR:host'])
def test_resize_to_same_host_with_anti_affinity(self):
self.flags(allow_resize_to_same_host=True)
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group,
flavor=self.api.get_flavors()[0])
post = {'resize': {'flavorRef': '2'}}
server1_old_host = servers[1]['OS-EXT-SRV-ATTR:host']
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
self.assertEqual(server1_old_host,
migrated_server['OS-EXT-SRV-ATTR:host'])
def _get_compute_service_by_host_name(self, host_name):
host = None
if self.compute.host == host_name:
host = self.compute
elif self.compute2.host == host_name:
host = self.compute2
else:
raise AssertionError('host = %s does not found in '
'existing hosts %s' %
(host_name, str([self.compute.host,
self.compute2.host])))
return host
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
self.start_service('compute', host='host3')
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
# check that anti-affinity policy is kept during evacuation
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {'onSharedStorage': False}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_soft_affinity_not_supported(self):
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_groups,
{'name': 'fake-name-1',
'policies': ['soft-affinity']})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Invalid input', ex.response.text)
self.assertIn('soft-affinity', ex.response.text)
class ServerGroupAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only anti-affinity filter so affinity will be missing
_enabled_filters = ['ServerGroupAntiAffinityFilter']
@mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None)
def test_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
# Load only affinity filter so anti-affinity will be missing
_enabled_filters = ['ServerGroupAffinityFilter']
@mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None)
def test_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupAntiAffinityFilter not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def _get_weight_classes(self):
# Load only soft-anti-affinity weigher so affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None)
def test_soft_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
def _get_weight_classes(self):
# Load only soft affinity filter so anti-affinity will be missing
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher']
@mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None)
def test_soft_anti_affinity_no_filter(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
failed_server = self._boot_a_server_to_group(created_group,
expected_status='ERROR')
self.assertEqual('ServerGroup policy is not supported: '
'ServerGroupSoftAntiAffinityWeigher not configured',
failed_server['fault']['message'])
self.assertEqual(400, failed_server['fault']['code'])
class ServerGroupTestV215(ServerGroupTestV21):
api_major_version = 'v2.1'
microversion = '2.15'
soft_anti_affinity = {'name': 'fake-name-3',
'policies': ['soft-anti-affinity']}
soft_affinity = {'name': 'fake-name-4',
'policies': ['soft-affinity']}
def setUp(self):
super(ServerGroupTestV215, self).setUp()
soft_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY')
soft_anti_affinity_patcher = mock.patch(
'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY')
self.addCleanup(soft_affinity_patcher.stop)
self.addCleanup(soft_anti_affinity_patcher.stop)
self.mock_soft_affinity = soft_affinity_patcher.start()
self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start()
self.mock_soft_affinity.return_value = None
self.mock_soft_anti_affinity.return_value = None
def _get_weight_classes(self):
return ['nova.scheduler.weights.affinity.'
'ServerGroupSoftAffinityWeigher',
'nova.scheduler.weights.affinity.'
'ServerGroupSoftAntiAffinityWeigher']
def test_evacuate_with_anti_affinity(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
# Start additional host to test evacuation
fake.set_nodes(['host3'])
compute3 = self.start_service('compute', host='host3')
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
# check that policy is kept
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host'])
compute3.kill()
host.start()
def test_evacuate_with_anti_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.anti_affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_evacuate_with_affinity_no_valid_host(self):
created_group = self.api.post_server_groups(self.affinity)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['error'])
server_after_failed_evac = self._wait_for_state_change(
self.admin_api, servers[1], 'ERROR')
# assert that after a failed evac the server active on the same host
# as before
self.assertEqual(server_after_failed_evac['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
host.start()
def test_create_and_delete_groups(self):
groups = [self.anti_affinity,
self.affinity,
self.soft_affinity,
self.soft_anti_affinity]
created_groups = []
for group in groups:
created_group = self.api.post_server_groups(group)
created_groups.append(created_group)
self.assertEqual(group['name'], created_group['name'])
self.assertEqual(group['policies'], created_group['policies'])
self.assertEqual([], created_group['members'])
self.assertEqual({}, created_group['metadata'])
self.assertIn('id', created_group)
group_details = self.api.get_server_group(created_group['id'])
self.assertEqual(created_group, group_details)
existing_groups = self.api.get_server_groups()
self.assertIn(created_group, existing_groups)
existing_groups = self.api.get_server_groups()
self.assertEqual(len(groups), len(existing_groups))
for group in created_groups:
self.api.delete_server_group(group['id'])
existing_groups = self.api.get_server_groups()
self.assertNotIn(group, existing_groups)
def test_boot_servers_with_soft_affinity(self):
created_group = self.api.post_server_groups(self.soft_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_affinity_no_resource_on_first_host(self):
created_group = self.api.post_server_groups(self.soft_affinity)
# Using big enough flavor to use up the resources on the first host
flavor = self.api.get_flavors()[2]
servers = self._boot_servers_to_group(created_group, flavor)
# The third server cannot be booted on the first host as there
# is not enough resource there, but as opposed to the affinity policy
# it will be booted on the other host, which has enough resources.
third_server = self._boot_a_server_to_group(created_group,
flavor=flavor)
members = self.api.get_server_group(created_group['id'])['members']
hosts = []
for server in servers:
hosts.append(server['OS-EXT-SRV-ATTR:host'])
self.assertIn(third_server['id'], members)
self.assertNotIn(third_server['OS-EXT-SRV-ATTR:host'], hosts)
def test_boot_servers_with_soft_anti_affinity(self):
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
self.assertEqual(2, len(servers))
self.assertIn(servers[0]['id'], members)
self.assertIn(servers[1]['id'], members)
self.assertNotEqual(servers[0]['OS-EXT-SRV-ATTR:host'],
servers[1]['OS-EXT-SRV-ATTR:host'])
def test_boot_servers_with_soft_anti_affinity_one_available_host(self):
self.compute2.kill()
created_group = self.api.post_server_groups(self.soft_anti_affinity)
servers = self._boot_servers_to_group(created_group)
members = self.api.get_server_group(created_group['id'])['members']
host = servers[0]['OS-EXT-SRV-ATTR:host']
for server in servers:
self.assertIn(server['id'], members)
self.assertEqual(host, server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_affinity)
self.assertEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def test_rebuild_with_soft_anti_affinity(self):
untouched_server, rebuilt_server = self._rebuild_with_group(
self.soft_anti_affinity)
self.assertNotEqual(untouched_server['OS-EXT-SRV-ATTR:host'],
rebuilt_server['OS-EXT-SRV-ATTR:host'])
def _migrate_with_soft_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
post = {'migrate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
migrated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'VERIFY_RESIZE')
return [migrated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_migrate_with_soft_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_affinity))
self.assertNotEqual(migrated_server, other_server)
def test_migrate_with_soft_anti_affinity(self):
migrated_server, other_server = (
self._migrate_with_soft_affinity_policies(self.soft_anti_affinity))
self.assertEqual(migrated_server, other_server)
def _evacuate_with_soft_anti_affinity_policies(self, group):
created_group = self.api.post_server_groups(group)
servers = self._boot_servers_to_group(created_group)
host = self._get_compute_service_by_host_name(
servers[1]['OS-EXT-SRV-ATTR:host'])
host.stop()
# Need to wait service_down_time amount of seconds to ensure
# nova considers the host down
time.sleep(self._service_down_time)
post = {'evacuate': {}}
self.admin_api.post_server_action(servers[1]['id'], post)
self._wait_for_migration_status(servers[1], ['done'])
evacuated_server = self._wait_for_state_change(
self.admin_api, servers[1], 'ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
# new host later
evacuated_server = self.admin_api.get_server(evacuated_server['id'])
host.start()
return [evacuated_server['OS-EXT-SRV-ATTR:host'],
servers[0]['OS-EXT-SRV-ATTR:host']]
def test_evacuate_with_soft_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_affinity))
self.assertNotEqual(evacuated_server, other_server)
def test_evacuate_with_soft_anti_affinity(self):
evacuated_server, other_server = (
self._evacuate_with_soft_anti_affinity_policies(
self.soft_anti_affinity))
self.assertEqual(evacuated_server, other_server)
def test_soft_affinity_not_supported(self):
pass
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
def setUp(self):
super(ServerGroupTestMultiCell, self).setUp()
# Start two compute services, one per cell
fake.set_nodes(['host1'])
self.addCleanup(fake.restore_nodes)
self.compute1 = self.start_service('compute', host='host1',
cell='cell1')
fake.set_nodes(['host2'])
self.addCleanup(fake.restore_nodes)
self.compute2 = self.start_service('compute', host='host2',
cell='cell2')
# This is needed to avoid a NetworkAmbiguous error during
# _validate_requested_port_ids in allocate_for_instance.
fake_network.set_stub_network_methods(self)
# This is needed to find a server that is still booting with multiple
# cells, while waiting for the state change to ACTIVE. See the
# _get_instance method in the compute/api for details.
self.useFixture(nova_fixtures.AllServicesCurrent())
self.aggregates = {}
def _create_aggregate(self, name):
agg = self.admin_api.post_aggregate({'aggregate': {'name': name}})
self.aggregates[name] = agg
def _add_host_to_aggregate(self, agg, host):
"""Add a compute host to nova aggregates.
:param agg: Name of the nova aggregate
:param host: Name of the compute host
"""
agg = self.aggregates[agg]
self.admin_api.add_host_to_aggregate(agg['id'], host)
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'availability_zone': az,
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def test_boot_servers_with_affinity(self):
# Create a server group for affinity
# As of microversion 2.64, a single policy must be specified when
# creating a server group.
created_group = self.api.post_server_groups(self.affinity)
# Create aggregates for cell1 and cell2
self._create_aggregate('agg1_cell1')
self._create_aggregate('agg2_cell2')
# Add each cell to a separate aggregate
self._add_host_to_aggregate('agg1_cell1', 'host1')
self._add_host_to_aggregate('agg2_cell2', 'host2')
# Set each cell to a separate availability zone
self._set_az_aggregate('agg1_cell1', 'cell1')
self._set_az_aggregate('agg2_cell2', 'cell2')
# Boot a server to cell2 with the affinity policy. Order matters here
# because the CellDatabases fixture defaults the local cell database to
# cell1. So boot the server to cell2 where the group member cannot be
# found as a result of the default setting.
self._boot_a_server_to_group(created_group, az='cell2')
# Boot a server to cell1 with the affinity policy. This should fail
# because group members found in cell2 should violate the policy.
self._boot_a_server_to_group(created_group, az='cell1',
expected_status='ERROR')
| 43.122713
| 79
| 0.64574
|
2fec9d09a2b708d6dbd67453ee97c2358c723a39
| 3,130
|
py
|
Python
|
pymake/processpoolexecutor.py
|
CallumJHays/pymake
|
01dfed42e20cd05d4843425633b7c17828f924c4
|
[
"MIT"
] | 1
|
2021-09-08T09:30:05.000Z
|
2021-09-08T09:30:05.000Z
|
pymake/processpoolexecutor.py
|
CallumJHays/pymake
|
01dfed42e20cd05d4843425633b7c17828f924c4
|
[
"MIT"
] | null | null | null |
pymake/processpoolexecutor.py
|
CallumJHays/pymake
|
01dfed42e20cd05d4843425633b7c17828f924c4
|
[
"MIT"
] | null | null | null |
"""Main module."""
import pickle
from multiprocessing.pool import MapResult
from typing import Any, Callable, Optional, TypeVar, Iterator, Union
# monkey-patches multiprocessing so that pathos uses the superior serialization
import dill # type: ignore
import multiprocess.pool # type: ignore
from types import TracebackType
from concurrent.futures import Future, ProcessPoolExecutor as _ProcessPoolExecutor
# monkey-patches pickle so that pathos uses the superior serialization
from tblib import pickling_support # type: ignore
pickling_support.install() # type: ignore
T = TypeVar('T')
class ExceptionWithTraceback:
def __init__(self, exc: Exception, tb: TracebackType):
self.exception = exc
self.tb_dump = pickle.dumps(tb)
def __reduce__(self):
def inner(exc: Exception, tb: bytes):
return exc.with_traceback(pickle.loads(tb))
return inner, (self.exception, self.tb_dump)
multiprocess.pool.ExceptionWithTraceback = ExceptionWithTraceback
# SerializableStack = List[Tuple[int, int]]
# def create_selftracingerror(original: BaseException, traceback: TracebackType):
# stack: SerializableStack = []
# frame = traceback.tb_frame
# stack.append((frame.f_lasti, frame.f_lineno))
# while traceback.tb_next:
# frame = traceback.tb_frame
# stack.append((frame.f_lasti, frame.f_lineno))
# traceback = traceback.tb_next
# return _SelfTracingError(original, stack)
# class _SelfTracingError(Exception):
# def __init__(self, original: BaseException = None, stack: SerializableStack = None):
# super().__init__()
# self.original = original
# self.stack = stack
# def recreate(self):
# traceback = None
# frame = inspect.stack()[0].frame
# for f_lasti, f_lineno in self.stack:
# traceback = TracebackType(
# traceback, frame, f_lasti, f_lineno)
# res = self.original.with_traceback(traceback)
# return res
class ProcessPoolExecutor(_ProcessPoolExecutor):
def __init__(self, max_workers: Optional[int] = None):
kwargs = {'nodes': max_workers} if max_workers else {}
self.pool = multiprocess.pool.Pool(**kwargs)
def submit(
self,
fn: Callable[..., T],
*args: Any,
**kwargs: Any
) -> 'Future[T]':
fut: 'Future[T]' = Future()
def reraise(e: Exception):
raise e
self.pool.apply_async( # type: ignore
fn, args, kwargs, callback=fut.set_result, error_callback=reraise)
fut.set_running_or_notify_cancel()
return fut
def map(
self,
fn: Callable[..., T],
*iterables: Any,
timeout: Optional[float] = None,
chunksize: int = 1
) -> Iterator[T]:
res: MapResult[T] = \
self.pool.map_async( # type: ignore
fn, iterables, chunksize=chunksize)
return iter(res.get(timeout))
def shutdown(self, wait: bool = True):
self.pool.close()
if wait:
self.pool.join()
self.pool.terminate()
| 30.38835
| 90
| 0.646006
|
70856c6c8bd05e5e38e5399f7b78fb16454093e9
| 1,755
|
py
|
Python
|
package/spack-cityhash/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-cityhash/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-cityhash/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cityhash(AutotoolsPackage):
"""CityHash, a family of hash functions for strings."""
homepage = "https://github.com/google/cityhash"
url = "https://github.com/google/cityhash"
version('2013-07-31', git='https://github.com/google/cityhash.git',
commit='8af9b8c2b889d80c22d6bc26ba0df1afb79a30db')
version('master', branch='master',
git='https://github.com/google/cityhash.git')
def configure_args(self):
return ['--enable-sse4.2']
| 42.804878
| 78
| 0.668376
|
f9a14b11e91a8c32ec0a73bfaa3d47dcc289d61e
| 17,524
|
py
|
Python
|
udemy/decryptor/mp4parse.py
|
vquilon/udemy-dl
|
d8773553d5c644a36db97d82fa62dba4a422c3af
|
[
"MIT"
] | 275
|
2021-05-18T21:28:33.000Z
|
2022-03-30T19:44:28.000Z
|
udemy/decryptor/mp4parse.py
|
vquilon/udemy-dl
|
d8773553d5c644a36db97d82fa62dba4a422c3af
|
[
"MIT"
] | 97
|
2021-05-18T22:44:07.000Z
|
2022-03-26T01:19:59.000Z
|
udemy/decryptor/mp4parse.py
|
vquilon/udemy-dl
|
d8773553d5c644a36db97d82fa62dba4a422c3af
|
[
"MIT"
] | 148
|
2021-05-18T21:28:40.000Z
|
2022-03-31T06:51:27.000Z
|
""" MP4 Parser based on:
http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf
@author: Alastair McCormack
@license: MIT License
"""
import bitstring
from datetime import datetime
from collections import namedtuple
import logging
import six
log = logging.getLogger(__name__)
#log.addHandler(logging.NullHandler())
log.setLevel(logging.WARN)
class MixinDictRepr(object):
def __repr__(self, *args, **kwargs):
return "{class_name} : {content!r} ".format(class_name=self.__class__.__name__,
content=self.__dict__)
class MixinMinimalRepr(object):
""" A minimal representaion when the payload could be large """
def __repr__(self, *args, **kwargs):
return "{class_name} : {content!r} ".format(class_name=self.__class__.__name__,
content=self.__dict__.keys())
class FragmentRunTableBox(MixinDictRepr):
pass
class UnImplementedBox(MixinDictRepr):
type = "na"
pass
class MovieFragmentBox(MixinDictRepr):
type = "moof"
class MovieBox(MixinDictRepr):
type = "moov"
class BootStrapInfoBox(MixinDictRepr):
type = "abst"
@property
def current_media_time(self):
return self._current_media_time
@current_media_time.setter
def current_media_time(self, epoch_timestamp):
""" Takes a timestamp arg and saves it as datetime """
self._current_media_time = datetime.utcfromtimestamp(epoch_timestamp/float(self.time_scale))
class FragmentRandomAccessBox(MixinDictRepr):
""" aka afra """
type = "afra"
FragmentRandomAccessBoxEntry = namedtuple("FragmentRandomAccessBoxEntry", ["time", "offset"])
FragmentRandomAccessBoxGlobalEntry = namedtuple("FragmentRandomAccessBoxGlobalEntry", ["time", "segment_number", "fragment_number", "afra_offset", "sample_offset"])
pass
class SegmentRunTable(MixinDictRepr):
type = "asrt"
SegmentRunTableEntry = namedtuple('SegmentRunTableEntry', ["first_segment", "fragments_per_segment"])
pass
class FragmentRunTable(MixinDictRepr):
type = "afrt"
class FragmentRunTableEntry( namedtuple('FragmentRunTableEntry',
["first_fragment",
"first_fragment_timestamp",
"fragment_duration",
"discontinuity_indicator"]) ):
DI_END_OF_PRESENTATION = 0
DI_NUMBERING = 1
DI_TIMESTAMP = 2
DI_TIMESTAMP_AND_NUMBER = 3
def __eq__(self, other):
if self.first_fragment == other.first_fragment and \
self.first_fragment_timestamp == other.first_fragment_timestamp and \
self.fragment_duration == other.fragment_duration and \
self.discontinuity_indicator == other.discontinuity_indicator:
return True
def __repr__(self, *args, **kwargs):
return str(self.__dict__)
class MediaDataBox(MixinMinimalRepr):
""" aka mdat """
type = "mdat"
class MovieFragmentHeader(MixinDictRepr):
type = "mfhd"
class ProtectionSystemSpecificHeader(MixinDictRepr):
type = "pssh"
BoxHeader = namedtuple( "BoxHeader", ["box_size", "box_type", "header_size"] )
class F4VParser(object):
@classmethod
def parse(cls, filename=None, bytes_input=None, file_input=None, offset_bytes=0, headers_only=False):
"""
Parse an MP4 file or bytes into boxes
:param filename: filename of mp4 file.
:type filename: str.
:param bytes_input: bytes of mp4 file.
:type bytes_input: bytes / Python 2.x str.
:param offset_bytes: start parsing at offset.
:type offset_bytes: int.
:param headers_only: Ignore data and return just headers. Useful when data is cut short
:type: headers_only: boolean
:return: BMFF Boxes or Headers
"""
box_lookup = {
BootStrapInfoBox.type: cls._parse_abst,
FragmentRandomAccessBox.type: cls._parse_afra,
MediaDataBox.type: cls._parse_mdat,
MovieFragmentBox.type: cls._parse_moof,
MovieBox.type: cls._parse_moov,
MovieFragmentHeader.type: cls._parse_mfhd,
ProtectionSystemSpecificHeader.type: cls._parse_pssh
}
if filename:
bs = bitstring.ConstBitStream(filename=filename, offset=offset_bytes * 8)
elif bytes_input:
bs = bitstring.ConstBitStream(bytes=bytes_input, offset=offset_bytes * 8)
else:
bs = bitstring.ConstBitStream(auto=file_input, offset=offset_bytes * 8)
log.debug("Starting parse")
log.debug("Size is %d bits", bs.len)
while bs.pos < bs.len:
log.debug("Byte pos before header: %d relative to (%d)", bs.bytepos, offset_bytes)
log.debug("Reading header")
try:
header = cls._read_box_header(bs)
except bitstring.ReadError as e:
log.error("Premature end of data while reading box header")
raise
log.debug("Header type: %s", header.box_type)
log.debug("Byte pos after header: %d relative to (%d)", bs.bytepos, offset_bytes)
if headers_only:
yield header
# move pointer to next header if possible
try:
bs.bytepos += header.box_size
except ValueError:
log.warning("Premature end of data")
raise
else:
# Get parser method for header type
parse_function = box_lookup.get(header.box_type, cls._parse_unimplemented)
try:
yield parse_function(bs, header)
except ValueError as e:
log.error("Premature end of data")
raise
@classmethod
def _is_mp4(cls, parser):
try:
for box in parser:
return True
except ValueError:
return False
@classmethod
def is_mp4_s(cls, bytes_input):
""" Is bytes_input the contents of an MP4 file
:param bytes_input: str/bytes to check.
:type bytes_input: str/bytes.
:return:
"""
parser = cls.parse(bytes_input=bytes_input, headers_only=True)
return cls._is_mp4(parser)
@classmethod
def is_mp4(cls, file_input):
""" Checks input if it's an MP4 file
:param input: Filename or file object
:type input: str, file
:param state: Current state to be in.
:type state: bool.
:returns: bool.
:raises: AttributeError, KeyError
"""
if hasattr(file_input, "read"):
parser = cls.parse(file_input=file_input, headers_only=True)
else:
parser = cls.parse(filename=file_input, headers_only=True)
return cls._is_mp4(parser)
@staticmethod
def _read_string(bs):
""" read UTF8 null terminated string """
result = bs.readto('0x00', bytealigned=True).bytes.decode("utf-8")[:-1]
return result if result else None
@classmethod
def _read_count_and_string_table(cls, bs):
""" Read a count then return the strings in a list """
result = []
entry_count = bs.read("uint:8")
for _ in six.range(0, entry_count):
result.append( cls._read_string(bs) )
return result
@staticmethod
def _read_box_header(bs):
header_start_pos = bs.bytepos
size, box_type = bs.readlist("uint:32, bytes:4")
# box_type should be an ASCII string. Decode as UTF-8 in case
try:
box_type = box_type.decode('utf-8')
except UnicodeDecodeError:
# we'll leave as bytes instead
pass
# if size == 1, then this is an extended size type.
# Therefore read the next 64 bits as size
if size == 1:
size = bs.read("uint:64")
header_end_pos = bs.bytepos
header_size = header_end_pos - header_start_pos
return BoxHeader(box_size=size-header_size, box_type=box_type, header_size=header_size)
@staticmethod
def _parse_unimplemented(bs, header):
ui = UnImplementedBox()
ui.header = header
bs.bytepos += header.box_size
return ui
@classmethod
def _parse_afra(cls, bs, header):
afra = FragmentRandomAccessBox()
afra.header = header
# read the entire box in case there's padding
afra_bs = bs.read(header.box_size * 8)
# skip Version and Flags
afra_bs.pos += 8 + 24
long_ids, long_offsets, global_entries, afra.time_scale, local_entry_count = \
afra_bs.readlist("bool, bool, bool, pad:5, uint:32, uint:32")
if long_ids:
id_bs_type = "uint:32"
else:
id_bs_type = "uint:16"
if long_offsets:
offset_bs_type = "uint:64"
else:
offset_bs_type = "uint:32"
log.debug("local_access_entries entry count: %s", local_entry_count)
afra.local_access_entries = []
for _ in six.range(0, local_entry_count):
time = cls._parse_time_field(afra_bs, afra.time_scale)
offset = afra_bs.read(offset_bs_type)
afra_entry = \
FragmentRandomAccessBox.FragmentRandomAccessBoxEntry(time=time,
offset=offset)
afra.local_access_entries.append(afra_entry)
afra.global_access_entries = []
if global_entries:
global_entry_count = afra_bs.read("uint:32")
log.debug("global_access_entries entry count: %s", global_entry_count)
for _ in six.range(0, global_entry_count):
time = cls._parse_time_field(afra_bs, afra.time_scale)
segment_number = afra_bs.read(id_bs_type)
fragment_number = afra_bs.read(id_bs_type)
afra_offset = afra_bs.read(offset_bs_type)
sample_offset = afra_bs.read(offset_bs_type)
afra_global_entry = \
FragmentRandomAccessBox.FragmentRandomAccessBoxGlobalEntry(
time=time,
segment_number=segment_number,
fragment_number=fragment_number,
afra_offset=afra_offset,
sample_offset=sample_offset)
afra.global_access_entries.append(afra_global_entry)
return afra
@classmethod
def _parse_moof(cls, bootstrap_bs, header):
moof = MovieFragmentBox()
moof.header = header
box_bs = bootstrap_bs.read(moof.header.box_size * 8)
for child_box in cls.parse(bytes_input=box_bs.bytes):
setattr(moof, child_box.type, child_box)
return moof
@classmethod
def _parse_moov(cls, bootstrap_bs, header):
moov = MovieBox()
moov.header = header
psshs = []
box_bs = bootstrap_bs.read(moov.header.box_size * 8)
for child_box in cls.parse(bytes_input=box_bs.bytes):
if(child_box.type == "pssh"):
psshs.append(child_box)
else:
setattr(moov, child_box.type, child_box)
setattr(moov, "pssh", psshs)
return moov
@classmethod
def _parse_mfhd(cls, bootstrap_bs, header):
mfhd = MovieFragmentHeader()
mfhd.header = header
box_bs = bootstrap_bs.read(mfhd.header.box_size * 8)
return mfhd
@staticmethod
def _parse_pssh(bootstrap_bs, header):
pssh = ProtectionSystemSpecificHeader()
pssh.header = header
box_bs = bootstrap_bs.read(pssh.header.box_size*8)
# Payload appears to be 8 bytes in.
data = box_bs.hex[8:]
pssh.system_id = data[:32]
pssh.payload = data[40:]
return pssh
@classmethod
def _parse_abst(cls, bootstrap_bs, header):
abst = BootStrapInfoBox()
abst.header = header
box_bs = bootstrap_bs.read(abst.header.box_size * 8)
abst.version, abst.profile_raw, abst.live, abst.update, \
abst.time_scale, abst.current_media_time, abst.smpte_timecode_offset = \
box_bs.readlist("""pad:8, pad:24, uint:32, uint:2, bool, bool,
pad:4,
uint:32, uint:64, uint:64""")
abst.movie_identifier = cls._read_string(box_bs)
abst.server_entry_table = cls._read_count_and_string_table(box_bs)
abst.quality_entry_table = cls._read_count_and_string_table(box_bs)
abst.drm_data = cls._read_string(box_bs)
abst.meta_data = cls._read_string(box_bs)
abst.segment_run_tables = []
segment_count = box_bs.read("uint:8")
log.debug("segment_count: %d" % segment_count)
for _ in six.range(0, segment_count):
abst.segment_run_tables.append( cls._parse_asrt(box_bs) )
abst.fragment_tables = []
fragment_count = box_bs.read("uint:8")
log.debug("fragment_count: %d" % fragment_count)
for _ in xrange(0, fragment_count):
abst.fragment_tables.append( cls._parse_afrt(box_bs) )
log.debug("Finished parsing abst")
return abst
@classmethod
def _parse_asrt(cls, box_bs):
""" Parse asrt / Segment Run Table Box """
asrt = SegmentRunTable()
asrt.header = cls._read_box_header(box_bs)
# read the entire box in case there's padding
asrt_bs_box = box_bs.read(asrt.header.box_size * 8)
asrt_bs_box.pos += 8
update_flag = asrt_bs_box.read("uint:24")
asrt.update = True if update_flag == 1 else False
asrt.quality_segment_url_modifiers = cls._read_count_and_string_table(asrt_bs_box)
asrt.segment_run_table_entries = []
segment_count = asrt_bs_box.read("uint:32")
for _ in six.range(0, segment_count):
first_segment = asrt_bs_box.read("uint:32")
fragments_per_segment = asrt_bs_box.read("uint:32")
asrt.segment_run_table_entries.append(
SegmentRunTable.SegmentRunTableEntry(first_segment=first_segment,
fragments_per_segment=fragments_per_segment) )
return asrt
@classmethod
def _parse_afrt(cls, box_bs):
""" Parse afrt / Fragment Run Table Box """
afrt = FragmentRunTable()
afrt.header = cls._read_box_header(box_bs)
# read the entire box in case there's padding
afrt_bs_box = box_bs.read(afrt.header.box_size * 8)
afrt_bs_box.pos += 8
update_flag = afrt_bs_box.read("uint:24")
afrt.update = True if update_flag == 1 else False
afrt.time_scale = afrt_bs_box.read("uint:32")
afrt.quality_fragment_url_modifiers = cls._read_count_and_string_table(afrt_bs_box)
fragment_count = afrt_bs_box.read("uint:32")
afrt.fragments = []
for _ in six.range(0, fragment_count):
first_fragment = afrt_bs_box.read("uint:32")
first_fragment_timestamp_raw = afrt_bs_box.read("uint:64")
try:
first_fragment_timestamp = datetime.utcfromtimestamp(first_fragment_timestamp_raw/float(afrt.time_scale))
except ValueError:
# Elemental sometimes create odd timestamps
first_fragment_timestamp = None
fragment_duration = afrt_bs_box.read("uint:32")
if fragment_duration == 0:
discontinuity_indicator = afrt_bs_box.read("uint:8")
else:
discontinuity_indicator = None
frte = FragmentRunTable.FragmentRunTableEntry(first_fragment=first_fragment,
first_fragment_timestamp=first_fragment_timestamp,
fragment_duration=fragment_duration,
discontinuity_indicator=discontinuity_indicator)
afrt.fragments.append(frte)
return afrt
@staticmethod
def _parse_mdat(box_bs, header):
""" Parse afrt / Fragment Run Table Box """
mdat = MediaDataBox()
mdat.header = header
mdat.payload = box_bs.read(mdat.header.box_size * 8).bytes
return mdat
@staticmethod
def _parse_time_field(bs, scale):
timestamp = bs.read("uint:64")
return datetime.utcfromtimestamp(timestamp / float(scale) )
| 35.259557
| 168
| 0.583942
|
21a453b7c9ab43daf8deb3817dbfe0adb616c829
| 25,062
|
py
|
Python
|
neural_clbf/controllers/neural_clbf_controller.py
|
MIT-REALM/neural_clbf
|
5eda47941aabc6cb4147c618c9fc5b58e1591d67
|
[
"BSD-3-Clause"
] | 9
|
2022-01-22T11:47:11.000Z
|
2022-03-08T14:49:38.000Z
|
neural_clbf/controllers/neural_clbf_controller.py
|
MIT-REALM/neural_clbf
|
5eda47941aabc6cb4147c618c9fc5b58e1591d67
|
[
"BSD-3-Clause"
] | 1
|
2021-11-14T22:30:20.000Z
|
2021-11-19T14:40:49.000Z
|
neural_clbf/controllers/neural_clbf_controller.py
|
MIT-REALM/neural_clbf
|
5eda47941aabc6cb4147c618c9fc5b58e1591d67
|
[
"BSD-3-Clause"
] | 5
|
2022-01-23T17:02:52.000Z
|
2022-03-29T22:26:59.000Z
|
import itertools
from typing import Tuple, List, Optional
from collections import OrderedDict
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from neural_clbf.systems import ControlAffineSystem
from neural_clbf.systems.utils import ScenarioList
from neural_clbf.controllers.clf_controller import CLFController
from neural_clbf.controllers.controller_utils import normalize_with_angles
from neural_clbf.datamodules.episodic_datamodule import EpisodicDataModule
from neural_clbf.experiments import ExperimentSuite
class NeuralCLBFController(pl.LightningModule, CLFController):
"""
A neural rCLBF controller. Differs from the CLFController in that it uses a
neural network to learn the CLF, and it turns it from a CLF to a CLBF by making sure
that a level set of the CLF separates the safe and unsafe regions.
More specifically, the CLBF controller looks for a V such that
V(goal) = 0
V >= 0
V(safe) < c
V(unsafe) > c
dV/dt <= -lambda V
This proves forward invariance of the c-sublevel set of V, and since the safe set is
a subset of this sublevel set, we prove that the unsafe region is not reachable from
the safe region. We also prove convergence to a point.
"""
def __init__(
self,
dynamics_model: ControlAffineSystem,
scenarios: ScenarioList,
datamodule: EpisodicDataModule,
experiment_suite: ExperimentSuite,
clbf_hidden_layers: int = 2,
clbf_hidden_size: int = 48,
clf_lambda: float = 1.0,
safe_level: float = 1.0,
clf_relaxation_penalty: float = 50.0,
controller_period: float = 0.01,
primal_learning_rate: float = 1e-3,
epochs_per_episode: int = 5,
penalty_scheduling_rate: float = 0.0,
num_init_epochs: int = 5,
barrier: bool = True,
add_nominal: bool = False,
normalize_V_nominal: bool = False,
):
"""Initialize the controller.
args:
dynamics_model: the control-affine dynamics of the underlying system
scenarios: a list of parameter scenarios to train on
experiment_suite: defines the experiments to run during training
clbf_hidden_layers: number of hidden layers to use for the CLBF network
clbf_hidden_size: number of neurons per hidden layer in the CLBF network
clf_lambda: convergence rate for the CLBF
safe_level: safety level set value for the CLBF
clf_relaxation_penalty: the penalty for relaxing CLBF conditions.
controller_period: the timestep to use in simulating forward Vdot
primal_learning_rate: the learning rate for SGD for the network weights,
applied to the CLBF decrease loss
epochs_per_episode: the number of epochs to include in each episode
penalty_scheduling_rate: the rate at which to ramp the rollout relaxation
penalty up to clf_relaxation_penalty. Set to 0 to
disable penalty scheduling (use constant penalty)
num_init_epochs: the number of epochs to pretrain the controller on the
linear controller
barrier: if True, train the CLBF to act as a barrier functions. If false,
effectively trains only a CLF.
add_nominal: if True, add the nominal V
normalize_V_nominal: if True, normalize V_nominal so that its average is 1
"""
super(NeuralCLBFController, self).__init__(
dynamics_model=dynamics_model,
scenarios=scenarios,
experiment_suite=experiment_suite,
clf_lambda=clf_lambda,
clf_relaxation_penalty=clf_relaxation_penalty,
controller_period=controller_period,
)
self.save_hyperparameters()
# Save the provided model
# self.dynamics_model = dynamics_model
self.scenarios = scenarios
self.n_scenarios = len(scenarios)
# Save the datamodule
self.datamodule = datamodule
# Save the experiments suits
self.experiment_suite = experiment_suite
# Save the other parameters
self.safe_level = safe_level
self.unsafe_level = safe_level
self.primal_learning_rate = primal_learning_rate
self.epochs_per_episode = epochs_per_episode
self.penalty_scheduling_rate = penalty_scheduling_rate
self.num_init_epochs = num_init_epochs
self.barrier = barrier
self.add_nominal = add_nominal
self.normalize_V_nominal = normalize_V_nominal
self.V_nominal_mean = 1.0
# Compute and save the center and range of the state variables
x_max, x_min = dynamics_model.state_limits
self.x_center = (x_max + x_min) / 2.0
self.x_range = (x_max - x_min) / 2.0
# Scale to get the input between (-k, k), centered at 0
self.k = 1.0
self.x_range = self.x_range / self.k
# We shouldn't scale or offset any angle dimensions
self.x_center[self.dynamics_model.angle_dims] = 0.0
self.x_range[self.dynamics_model.angle_dims] = 1.0
# Some of the dimensions might represent angles. We want to replace these
# dimensions with two dimensions: sin and cos of the angle. To do this, we need
# to figure out how many numbers are in the expanded state
n_angles = len(self.dynamics_model.angle_dims)
self.n_dims_extended = self.dynamics_model.n_dims + n_angles
# Define the CLBF network, which we denote V
self.clbf_hidden_layers = clbf_hidden_layers
self.clbf_hidden_size = clbf_hidden_size
# We're going to build the network up layer by layer, starting with the input
self.V_layers: OrderedDict[str, nn.Module] = OrderedDict()
self.V_layers["input_linear"] = nn.Linear(
self.n_dims_extended, self.clbf_hidden_size
)
self.V_layers["input_activation"] = nn.Tanh()
for i in range(self.clbf_hidden_layers):
self.V_layers[f"layer_{i}_linear"] = nn.Linear(
self.clbf_hidden_size, self.clbf_hidden_size
)
if i < self.clbf_hidden_layers - 1:
self.V_layers[f"layer_{i}_activation"] = nn.Tanh()
# self.V_layers["output_linear"] = nn.Linear(self.clbf_hidden_size, 1)
self.V_nn = nn.Sequential(self.V_layers)
def prepare_data(self):
return self.datamodule.prepare_data()
def setup(self, stage: Optional[str] = None):
return self.datamodule.setup(stage)
def train_dataloader(self):
return self.datamodule.train_dataloader()
def val_dataloader(self):
return self.datamodule.val_dataloader()
def test_dataloader(self):
return self.datamodule.test_dataloader()
def V_with_jacobian(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Computes the CLBF value and its Jacobian
args:
x: bs x self.dynamics_model.n_dims the points at which to evaluate the CLBF
returns:
V: bs tensor of CLBF values
JV: bs x 1 x self.dynamics_model.n_dims Jacobian of each row of V wrt x
"""
# Apply the offset and range to normalize about zero
x_norm = normalize_with_angles(self.dynamics_model, x)
# Compute the CLBF layer-by-layer, computing the Jacobian alongside
# We need to initialize the Jacobian to reflect the normalization that's already
# been done to x
bs = x_norm.shape[0]
JV = torch.zeros(
(bs, self.n_dims_extended, self.dynamics_model.n_dims)
).type_as(x)
# and for each non-angle dimension, we need to scale by the normalization
for dim in range(self.dynamics_model.n_dims):
JV[:, dim, dim] = 1.0 / self.x_range[dim].type_as(x)
# And adjust the Jacobian for the angle dimensions
for offset, sin_idx in enumerate(self.dynamics_model.angle_dims):
cos_idx = self.dynamics_model.n_dims + offset
JV[:, sin_idx, sin_idx] = x_norm[:, cos_idx]
JV[:, cos_idx, sin_idx] = -x_norm[:, sin_idx]
# Now step through each layer in V
V = x_norm
for layer in self.V_nn:
V = layer(V)
if isinstance(layer, nn.Linear):
JV = torch.matmul(layer.weight, JV)
elif isinstance(layer, nn.Tanh):
JV = torch.matmul(torch.diag_embed(1 - V ** 2), JV)
elif isinstance(layer, nn.ReLU):
JV = torch.matmul(torch.diag_embed(torch.sign(V)), JV)
# Compute the final activation
JV = torch.bmm(V.unsqueeze(1), JV)
V = 0.5 * (V * V).sum(dim=1)
if self.add_nominal:
# Get the nominal Lyapunov function
P = self.dynamics_model.P.type_as(x)
x0 = self.dynamics_model.goal_point.type_as(x)
# Reshape to use pytorch's bilinear function
P = P.reshape(1, self.dynamics_model.n_dims, self.dynamics_model.n_dims)
V_nominal = 0.5 * F.bilinear(x - x0, x - x0, P).squeeze()
# Reshape again to calculate the gradient
P = P.reshape(self.dynamics_model.n_dims, self.dynamics_model.n_dims)
JV_nominal = F.linear(x - x0, P)
JV_nominal = JV_nominal.reshape(x.shape[0], 1, self.dynamics_model.n_dims)
if self.normalize_V_nominal:
V_nominal /= self.V_nominal_mean
JV_nominal /= self.V_nominal_mean
V = V + V_nominal
JV = JV + JV_nominal
return V, JV
def forward(self, x):
"""Determine the control input for a given state using a QP
args:
x: bs x self.dynamics_model.n_dims tensor of state
returns:
u: bs x self.dynamics_model.n_controls tensor of control inputs
"""
return self.u(x)
def boundary_loss(
self,
x: torch.Tensor,
goal_mask: torch.Tensor,
safe_mask: torch.Tensor,
unsafe_mask: torch.Tensor,
accuracy: bool = False,
) -> List[Tuple[str, torch.Tensor]]:
"""
Evaluate the loss on the CLBF due to boundary conditions
args:
x: the points at which to evaluate the loss,
goal_mask: the points in x marked as part of the goal
safe_mask: the points in x marked safe
unsafe_mask: the points in x marked unsafe
accuracy: if True, return the accuracy (from 0 to 1) as well as the losses
returns:
loss: a list of tuples containing ("category_name", loss_value).
"""
eps = 1e-2
# Compute loss to encourage satisfaction of the following conditions...
loss = []
V = self.V(x)
# 1.) CLBF should be minimized on the goal point
V_goal_pt = self.V(self.dynamics_model.goal_point.type_as(x))
goal_term = 1e1 * V_goal_pt.mean()
loss.append(("CLBF goal term", goal_term))
# Only train these terms if we have a barrier requirement
if self.barrier:
# 2.) 0 < V <= safe_level in the safe region
V_safe = V[safe_mask]
safe_violation = F.relu(eps + V_safe - self.safe_level)
safe_V_term = 1e2 * safe_violation.mean()
loss.append(("CLBF safe region term", safe_V_term))
if accuracy:
safe_V_acc = (safe_violation <= eps).sum() / safe_violation.nelement()
loss.append(("CLBF safe region accuracy", safe_V_acc))
# 3.) V >= unsafe_level in the unsafe region
V_unsafe = V[unsafe_mask]
unsafe_violation = F.relu(eps + self.unsafe_level - V_unsafe)
unsafe_V_term = 1e2 * unsafe_violation.mean()
loss.append(("CLBF unsafe region term", unsafe_V_term))
if accuracy:
unsafe_V_acc = (
unsafe_violation <= eps
).sum() / unsafe_violation.nelement()
loss.append(("CLBF unsafe region accuracy", unsafe_V_acc))
return loss
def descent_loss(
self,
x: torch.Tensor,
goal_mask: torch.Tensor,
safe_mask: torch.Tensor,
unsafe_mask: torch.Tensor,
accuracy: bool = False,
requires_grad: bool = False,
) -> List[Tuple[str, torch.Tensor]]:
"""
Evaluate the loss on the CLBF due to the descent condition
args:
x: the points at which to evaluate the loss,
goal_mask: the points in x marked as part of the goal
safe_mask: the points in x marked safe
unsafe_mask: the points in x marked unsafe
accuracy: if True, return the accuracy (from 0 to 1) as well as the losses
requires_grad: if True, use a differentiable QP solver
returns:
loss: a list of tuples containing ("category_name", loss_value).
"""
# Compute loss to encourage satisfaction of the following conditions...
loss = []
# The CLBF decrease condition requires that V is decreasing everywhere where
# V <= safe_level. We'll encourage this in three ways:
#
# 1) Minimize the relaxation needed to make the QP feasible.
# 2) Compute the CLBF decrease at each point by linearizing
# 3) Compute the CLBF decrease at each point by simulating
# First figure out where this condition needs to hold
eps = 0.1
V = self.V(x)
if self.barrier:
condition_active = torch.sigmoid(10 * (self.safe_level + eps - V))
else:
condition_active = torch.tensor(1.0)
# Get the control input and relaxation from solving the QP, and aggregate
# the relaxation across scenarios
u_qp, qp_relaxation = self.solve_CLF_QP(x, requires_grad=requires_grad)
qp_relaxation = torch.mean(qp_relaxation, dim=-1)
# Minimize the qp relaxation to encourage satisfying the decrease condition
qp_relaxation_loss = (qp_relaxation * condition_active).mean()
loss.append(("QP relaxation", qp_relaxation_loss))
# Now compute the decrease using linearization
eps = 1.0
clbf_descent_term_lin = torch.tensor(0.0).type_as(x)
clbf_descent_acc_lin = torch.tensor(0.0).type_as(x)
# Get the current value of the CLBF and its Lie derivatives
Lf_V, Lg_V = self.V_lie_derivatives(x)
for i, s in enumerate(self.scenarios):
# Use the dynamics to compute the derivative of V
Vdot = Lf_V[:, i, :].unsqueeze(1) + torch.bmm(
Lg_V[:, i, :].unsqueeze(1),
u_qp.reshape(-1, self.dynamics_model.n_controls, 1),
)
Vdot = Vdot.reshape(V.shape)
violation = F.relu(eps + Vdot + self.clf_lambda * V)
violation *= condition_active
clbf_descent_term_lin += violation.mean()
clbf_descent_acc_lin += (violation <= eps).sum() / (
violation.nelement() * self.n_scenarios
)
loss.append(("CLBF descent term (linearized)", clbf_descent_term_lin))
if accuracy:
loss.append(("CLBF descent accuracy (linearized)", clbf_descent_acc_lin))
# Now compute the decrease using simulation
eps = 1.0
clbf_descent_term_sim = torch.tensor(0.0).type_as(x)
clbf_descent_acc_sim = torch.tensor(0.0).type_as(x)
for s in self.scenarios:
xdot = self.dynamics_model.closed_loop_dynamics(x, u_qp, params=s)
x_next = x + self.dynamics_model.dt * xdot
V_next = self.V(x_next)
violation = F.relu(
eps + (V_next - V) / self.controller_period + self.clf_lambda * V
)
violation *= condition_active
clbf_descent_term_sim += violation.mean()
clbf_descent_acc_sim += (violation <= eps).sum() / (
violation.nelement() * self.n_scenarios
)
loss.append(("CLBF descent term (simulated)", clbf_descent_term_sim))
if accuracy:
loss.append(("CLBF descent accuracy (simulated)", clbf_descent_acc_sim))
return loss
def initial_loss(self, x: torch.Tensor) -> List[Tuple[str, torch.Tensor]]:
"""
Compute the loss during the initialization epochs, which trains the net to
match the local linear lyapunov function
"""
loss = []
# The initial losses should decrease exponentially to zero, based on the epoch
epoch_count = max(self.current_epoch - self.num_init_epochs, 0)
decrease_factor = 0.8 ** epoch_count
# 1.) Compare the CLBF to the nominal solution
# Get the learned CLBF
V = self.V(x)
# Get the nominal Lyapunov function
P = self.dynamics_model.P.type_as(x)
x0 = self.dynamics_model.goal_point.type_as(x)
# Reshape to use pytorch's bilinear function
P = P.reshape(1, self.dynamics_model.n_dims, self.dynamics_model.n_dims)
V_nominal = 0.5 * F.bilinear(x - x0, x - x0, P).squeeze()
if self.normalize_V_nominal:
self.V_nominal_mean = V_nominal.mean()
V_nominal /= self.V_nominal_mean
# Compute the error between the two
clbf_mse_loss = (V - V_nominal) ** 2
clbf_mse_loss = decrease_factor * clbf_mse_loss.mean()
loss.append(("CLBF MSE", clbf_mse_loss))
return loss
def training_step(self, batch, batch_idx):
"""Conduct the training step for the given batch"""
# Extract the input and masks from the batch
x, goal_mask, safe_mask, unsafe_mask = batch
# Compute the losses
component_losses = {}
component_losses.update(self.initial_loss(x))
component_losses.update(
self.boundary_loss(x, goal_mask, safe_mask, unsafe_mask)
)
component_losses.update(
self.descent_loss(x, goal_mask, safe_mask, unsafe_mask, requires_grad=True)
)
# Compute the overall loss by summing up the individual losses
total_loss = torch.tensor(0.0).type_as(x)
# For the objectives, we can just sum them
for _, loss_value in component_losses.items():
if not torch.isnan(loss_value):
total_loss += loss_value
batch_dict = {"loss": total_loss, **component_losses}
return batch_dict
def training_epoch_end(self, outputs):
"""This function is called after every epoch is completed."""
# Outputs contains a list for each optimizer, and we need to collect the losses
# from all of them if there is a nested list
if isinstance(outputs[0], list):
outputs = itertools.chain(*outputs)
# Gather up all of the losses for each component from all batches
losses = {}
for batch_output in outputs:
for key in batch_output.keys():
# if we've seen this key before, add this component loss to the list
if key in losses:
losses[key].append(batch_output[key])
else:
# otherwise, make a new list
losses[key] = [batch_output[key]]
# Average all the losses
avg_losses = {}
for key in losses.keys():
key_losses = torch.stack(losses[key])
avg_losses[key] = torch.nansum(key_losses) / key_losses.shape[0]
# Log the overall loss...
self.log("Total loss / train", avg_losses["loss"], sync_dist=True)
# And all component losses
for loss_key in avg_losses.keys():
# We already logged overall loss, so skip that here
if loss_key == "loss":
continue
# Log the other losses
self.log(loss_key + " / train", avg_losses[loss_key], sync_dist=True)
def validation_step(self, batch, batch_idx):
"""Conduct the validation step for the given batch"""
# Extract the input and masks from the batch
x, goal_mask, safe_mask, unsafe_mask = batch
# Get the various losses
component_losses = {}
component_losses.update(
self.boundary_loss(x, goal_mask, safe_mask, unsafe_mask)
)
component_losses.update(self.descent_loss(x, goal_mask, safe_mask, unsafe_mask))
# Compute the overall loss by summing up the individual losses
total_loss = torch.tensor(0.0).type_as(x)
# For the objectives, we can just sum them
for _, loss_value in component_losses.items():
if not torch.isnan(loss_value):
total_loss += loss_value
# Also compute the accuracy associated with each loss
component_losses.update(
self.boundary_loss(x, goal_mask, safe_mask, unsafe_mask, accuracy=True)
)
component_losses.update(
self.descent_loss(x, goal_mask, safe_mask, unsafe_mask, accuracy=True)
)
batch_dict = {"val_loss": total_loss, **component_losses}
return batch_dict
def validation_epoch_end(self, outputs):
"""This function is called after every epoch is completed."""
# Gather up all of the losses for each component from all batches
losses = {}
for batch_output in outputs:
for key in batch_output.keys():
# if we've seen this key before, add this component loss to the list
if key in losses:
losses[key].append(batch_output[key])
else:
# otherwise, make a new list
losses[key] = [batch_output[key]]
# Average all the losses
avg_losses = {}
for key in losses.keys():
key_losses = torch.stack(losses[key])
avg_losses[key] = torch.nansum(key_losses) / key_losses.shape[0]
# Log the overall loss...
self.log("Total loss / val", avg_losses["val_loss"], sync_dist=True)
# And all component losses
for loss_key in avg_losses.keys():
# We already logged overall loss, so skip that here
if loss_key == "val_loss":
continue
# Log the other losses
self.log(loss_key + " / val", avg_losses[loss_key], sync_dist=True)
# **Now entering spicetacular automation zone**
# We automatically run experiments every few epochs
# Only plot every 5 epochs
if self.current_epoch % 5 != 0:
return
self.experiment_suite.run_all_and_log_plots(
self, self.logger, self.current_epoch
)
@pl.core.decorators.auto_move_data
def simulator_fn(
self,
x_init: torch.Tensor,
num_steps: int,
relaxation_penalty: Optional[float] = None,
):
# Choose parameters randomly
random_scenario = {}
for param_name in self.scenarios[0].keys():
param_max = max([s[param_name] for s in self.scenarios])
param_min = min([s[param_name] for s in self.scenarios])
random_scenario[param_name] = random.uniform(param_min, param_max)
return self.dynamics_model.simulate(
x_init,
num_steps,
self.u,
guard=self.dynamics_model.out_of_bounds_mask,
controller_period=self.controller_period,
params=random_scenario,
)
def on_validation_epoch_end(self):
"""This function is called at the end of every validation epoch"""
# We want to generate new data at the end of every episode
if self.current_epoch > 0 and self.current_epoch % self.epochs_per_episode == 0:
if self.penalty_scheduling_rate > 0:
relaxation_penalty = (
self.clf_relaxation_penalty
* self.current_epoch
/ self.penalty_scheduling_rate
)
else:
relaxation_penalty = self.clf_relaxation_penalty
# Use the models simulation function with this controller
def simulator_fn_wrapper(x_init: torch.Tensor, num_steps: int):
return self.simulator_fn(
x_init,
num_steps,
relaxation_penalty=relaxation_penalty,
)
self.datamodule.add_data(simulator_fn_wrapper)
def configure_optimizers(self):
clbf_params = list(self.V_nn.parameters())
clbf_opt = torch.optim.SGD(
clbf_params,
lr=self.primal_learning_rate,
weight_decay=1e-6,
)
self.opt_idx_dict = {0: "clbf"}
return [clbf_opt]
| 40.685065
| 88
| 0.619025
|
e3181a7e3801f617d5b75b5bea5760dee968796e
| 24,505
|
py
|
Python
|
tests/integration/features/steps/integration_steps.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/features/steps/integration_steps.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/features/steps/integration_steps.py
|
phanirajl/cassandra-medusa
|
04315068365fc372b6a26d8b0ed6d2b135db1d98
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Spotify AB. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cassandra
import configparser
import datetime
import json
import logging
import os
import shutil
import subprocess
import time
from behave import given, when, then
from pathlib import Path
from subprocess import PIPE
import signal
from cassandra.cluster import Cluster
import medusa.backup
import medusa.index
import medusa.listing
import medusa.purge
import medusa.report_latest
import medusa.restore_node
import medusa.status
import medusa.verify
from medusa.config import (
MedusaConfig,
StorageConfig,
CassandraConfig,
MonitoringConfig,
ChecksConfig,
)
from medusa.config import _namedtuple_from_dict
from medusa.storage import Storage
from medusa.monitoring import LocalMonitoring
def kill_cassandra():
p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if b"org.apache.cassandra.service.CassandraDaemon" in line:
logging.info(line)
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
def cleanup_monitoring(context):
LocalMonitoring(context.medusa_config.monitoring).truncate_metric_file()
def cleanup_storage(context, storage_provider):
if storage_provider == "local":
if os.path.isdir(os.path.join("/tmp", "medusa_it_bucket")):
shutil.rmtree(os.path.join("/tmp", "medusa_it_bucket"))
os.makedirs(os.path.join("/tmp", "medusa_it_bucket"))
elif storage_provider == "google_storage" or storage_provider.find("s3") == 0:
storage = Storage(config=context.medusa_config.storage)
objects = storage.storage_driver.list_objects()
for obj in objects:
storage.storage_driver.delete_object(obj)
@given(r'I have a fresh ccm cluster running named "{cluster_name}"')
def _i_have_a_fresh_ccm_cluster_running(context, cluster_name):
context.cassandra_version = "2.2.14"
context.session = None
context.cluster_name = cluster_name
subprocess.run(["ccm", "stop"], stdout=PIPE, stderr=PIPE)
kill_cassandra()
res = subprocess.run(
["ccm", "switch", context.cluster_name], stdout=PIPE, stderr=PIPE
)
if b"does not appear to be a valid cluster" not in res.stderr:
subprocess.check_call(
["ccm", "remove", context.cluster_name], stdout=PIPE, stderr=PIPE
)
subprocess.check_call(
[
"ccm",
"create",
context.cluster_name,
"-v",
"binary:" + context.cassandra_version,
"-n",
"1",
]
)
os.popen("ccm node1 updateconf 'storage_port: 7011'").read()
if os.uname().sysname == "Linux":
os.popen(
"""sed -i 's/#MAX_HEAP_SIZE="4G"/MAX_HEAP_SIZE="256m"/' ~/.ccm/"""
+ context.cluster_name
+ """/node1/conf/cassandra-env.sh"""
).read()
os.popen(
"""sed -i 's/#HEAP_NEWSIZE="800M"/HEAP_NEWSIZE="200M"/' ~/.ccm/"""
+ context.cluster_name
+ """/node1/conf/cassandra-env.sh"""
).read()
os.popen("LOCAL_JMX=yes ccm start --no-wait").read()
context.session = connect_cassandra()
@given(r'I am using "{storage_provider}" as storage provider')
def i_am_using_storage_provider(context, storage_provider):
logging.info("Starting the tests")
if not hasattr(context, "cluster_name"):
context.cluster_name = "test"
config = configparser.ConfigParser(interpolation=None)
if storage_provider == "local":
if os.path.isdir(os.path.join("/tmp", "medusa_it_bucket")):
shutil.rmtree(os.path.join("/tmp", "medusa_it_bucket"))
os.makedirs(os.path.join("/tmp", "medusa_it_bucket"))
config["storage"] = {
"host_file_separator": ",",
"bucket_name": "medusa_it_bucket",
"key_file": "",
"storage_provider": "local",
"fqdn": "localhost",
"api_key_or_username": "",
"api_secret_or_password": "",
"base_path": "/tmp",
}
elif storage_provider == "google_storage":
config["storage"] = {
"host_file_separator": ",",
"bucket_name": "medusa_it_bucket",
"key_file": "~/medusa_credentials.json",
"storage_provider": "google_storage",
"fqdn": "localhost",
"api_key_or_username": "",
"api_secret_or_password": "",
"base_path": "/tmp",
}
elif storage_provider.startswith("s3"):
config["storage"] = {
"host_file_separator": ",",
"bucket_name": "tlp-medusa-dev",
"key_file": "~/.aws/credentials",
"storage_provider": storage_provider,
"fqdn": "localhost",
"api_key_or_username": "",
"api_secret_or_password": "",
"api_profile": "default",
"base_path": "/tmp",
"multi_part_upload_threshold": 1 * 1024,
"concurrent_transfers": 4
}
config["cassandra"] = {
"is_ccm": 1,
"stop_cmd": "ccm stop",
"start_cmd": "ccm start",
"cql_username": "cassandra",
"cql_password": "cassandra",
"config_file": os.path.expanduser(
os.path.join(
"~/.ccm", context.cluster_name, "node1", "conf", "cassandra.yaml"
)
),
"sstableloader_bin": os.path.expanduser(
os.path.join(
"~/.ccm",
"repository",
context.cassandra_version,
"bin",
"sstableloader",
)
),
}
config["monitoring"] = {"monitoring_provider": "local"}
context.medusa_config = MedusaConfig(
storage=_namedtuple_from_dict(StorageConfig, config["storage"]),
cassandra=_namedtuple_from_dict(CassandraConfig, config["cassandra"]),
monitoring=_namedtuple_from_dict(MonitoringConfig, config["monitoring"]),
ssh=None,
restore=None,
)
cleanup_storage(context, storage_provider)
cleanup_monitoring(context)
@when(r'I create the "{table_name}" table in keyspace "{keyspace_name}"')
def _i_create_the_whatever_table(context, table_name, keyspace_name):
keyspace = """CREATE KEYSPACE IF NOT EXISTS {} WITH replication = {{'class':'SimpleStrategy',
'replication_factor':1}}"""
context.session.execute(keyspace.format(keyspace_name))
table = "CREATE TABLE IF NOT EXISTS {}.{} (id timeuuid PRIMARY KEY, value text);"
context.session.execute(table.format(keyspace_name, table_name))
@when('I create the "{table_name}" table with secondary index in keyspace "{keyspace_name}"')
def _i_create_the_table_with_si(context, table_name, keyspace_name):
keyspace = """CREATE KEYSPACE IF NOT EXISTS {} WITH replication = {{'class':'SimpleStrategy',
'replication_factor':1}}"""
context.session.execute(keyspace.format(keyspace_name))
table = "CREATE TABLE IF NOT EXISTS {}.{} (id timeuuid PRIMARY KEY, value text);"
context.session.execute(table.format(keyspace_name, table_name))
si = "CREATE INDEX IF NOT EXISTS {}_idx ON {}.{} (value);"
context.session.execute(si.format(table_name, keyspace_name, table_name))
@when(r'I load {nb_rows} rows in the "{table_name}" table')
def _i_load_rows_in_the_whatever_table(context, nb_rows, table_name):
for i in range(int(nb_rows)):
context.session.execute(
"INSERT INTO {} (id, value) VALUES(now(), '{}')".format(table_name, i)
)
@when(r'I run a "{command}" command')
def _i_run_a_whatever_command(context, command):
os.popen(command).read()
@when(r'I perform a backup in "{backup_mode}" mode of the node named "{backup_name}"')
def _i_perform_a_backup_of_the_node_named_backupname(context, backup_mode, backup_name):
(actual_backup_duration, actual_start, end, node_backup, node_backup_cache, num_files, start) \
= medusa.backup.main(context.medusa_config, backup_name, None, backup_mode)
context.latest_backup_cache = node_backup_cache
@then(r'I can see the backup named "{backup_name}" when I list the backups')
def _i_can_see_the_backup_named_backupname_when_i_list_the_backups(
context, backup_name
):
storage = Storage(config=context.medusa_config.storage)
cluster_backups = storage.list_cluster_backups()
found = False
for backup in cluster_backups:
if backup.name == backup_name:
found = True
assert found is True
@then(r'some files from the previous backup were not reuploaded')
def _some_files_from_the_previous_backup_were_not_reuploaded(context):
assert context.latest_backup_cache.replaced > 0
@then(r'I cannot see the backup named "{backup_name}" when I list the backups')
def _i_cannot_see_the_backup_named_backupname_when_i_list_the_backups(
context, backup_name
):
storage = Storage(config=context.medusa_config.storage)
cluster_backups = storage.list_cluster_backups()
found = False
for backup in cluster_backups:
if backup.name == backup_name:
found = True
assert found is False
@then('I can see the backup status for "{backup_name}" when I run the status command')
def _i_can_see_backup_status_when_i_run_the_status_command(context, backup_name):
medusa.status.status(config=context.medusa_config, backup_name=backup_name)
@then(r"I can see no backups when I list the backups")
def _i_can_see_no_backups(context):
storage = Storage(config=context.medusa_config.storage)
cluster_backups = storage.list_cluster_backups()
assert 0 == len(list(cluster_backups))
@then(
r'the backup named "{backup_name}" has {nb_sstables} SSTables '
+ r'for the "{table_name}" table in keyspace "{keyspace}"'
)
def _the_backup_named_backupname_has_nb_sstables_for_the_whatever_table(
context, backup_name, nb_sstables, table_name, keyspace
):
storage = Storage(config=context.medusa_config.storage)
path = os.path.join(
context.medusa_config.storage.fqdn, backup_name, "data", keyspace, table_name
)
objects = storage.storage_driver.list_objects(path)
sstables = list(filter(lambda obj: "-Data.db" in obj.name, objects))
if len(sstables) != int(nb_sstables):
logging.error("{} SSTables : {}".format(len(sstables), sstables))
logging.error("Was expecting {} SSTables".format(nb_sstables))
assert len(sstables) == int(nb_sstables)
@then(r'I can verify the backup named "{backup_name}" successfully')
def _i_can_verify_the_backup_named_successfully(context, backup_name):
medusa.verify.verify(context.medusa_config, backup_name)
@when(r'I restore the backup named "{backup_name}"')
def _i_restore_the_backup_named(context, backup_name):
medusa.restore_node.restore_node(
context.medusa_config,
Path("/tmp"),
backup_name,
in_place=True,
keep_auth=False,
seeds=None,
verify=None,
keyspaces={},
tables={},
use_sstableloader=False,
)
@when(r'I restore the backup named "{backup_name}" with the sstableloader')
def _i_restore_the_backup_named_with_sstableloader(context, backup_name):
medusa.restore_node.restore_node(
context.medusa_config,
Path("/tmp"),
backup_name,
in_place=True,
keep_auth=False,
seeds=None,
verify=None,
keyspaces={},
tables={},
use_sstableloader=True,
)
@when(r'I restore the backup named "{backup_name}" for "{fqtn}" table')
def _i_restore_the_backup_named_for_table(context, backup_name, fqtn):
medusa.restore_node.restore_node(
context.medusa_config,
Path("/tmp"),
backup_name,
in_place=True,
keep_auth=False,
seeds=None,
verify=None,
keyspaces={},
tables={fqtn},
use_sstableloader=False,
)
@then(r'I have {nb_rows} rows in the "{table_name}" table')
def _i_have_rows_in_the_table(context, nb_rows, table_name):
context.session = connect_cassandra()
rows = context.session.execute("select count(*) as nb from {}".format(table_name))
assert rows[0].nb == int(nb_rows)
@then(r'I can see the backup index entry for "{backup_name}"')
def _the_backup_named_backupname_is_present_in_the_index(context, backup_name):
storage = Storage(config=context.medusa_config.storage)
fqdn = context.medusa_config.storage.fqdn
path = os.path.join(
"index/backup_index", backup_name, "tokenmap_{}.json".format(fqdn)
)
tokenmap_from_index = storage.storage_driver.get_blob_content_as_string(path)
path = os.path.join(fqdn, backup_name, "meta", "tokenmap.json")
tokenmap_from_backup = storage.storage_driver.get_blob_content_as_string(path)
# Check that we have the manifest as well there
manifest_path = os.path.join(
"index/backup_index", backup_name, "manifest_{}.json".format(fqdn)
)
manifest_from_index = storage.storage_driver.get_blob_content_as_string(
manifest_path
)
path = os.path.join(fqdn, backup_name, "meta", "manifest.json")
manifest_from_backup = storage.storage_driver.get_blob_content_as_string(path)
assert (
tokenmap_from_backup == tokenmap_from_index
and manifest_from_backup == manifest_from_index
)
@then(
r'I can see the latest backup for "{expected_fqdn}" being called "{expected_backup_name}"'
)
def _the_latest_backup_for_fqdn_is_called_backupname(
context, expected_fqdn, expected_backup_name
):
storage = Storage(config=context.medusa_config.storage)
latest_backup = storage.latest_node_backup(fqdn=expected_fqdn)
assert latest_backup.name == expected_backup_name
@then(r'there is no latest backup for node "{fqdn}"')
def _there_is_no_latest_backup_for_node_fqdn(context, fqdn):
storage = Storage(config=context.medusa_config.storage)
node_backup = storage.latest_node_backup(fqdn=fqdn)
assert node_backup is None
@when(
r'node "{fqdn}" fakes a complete backup named "{backup_name}" on "{backup_datetime}"'
)
def _node_fakes_a_complete_backup(context, fqdn, backup_name, backup_datetime):
path_root = "/tmp/medusa_it_bucket"
fake_tokenmap = json.dumps(
{
"n1": {"tokens": [1], "is_up": True},
"n2": {"tokens": [2], "is_up": True},
"n3": {"tokens": [3], "is_up": True},
}
)
dir_path = os.path.join(path_root, "index", "backup_index", backup_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# fake token map, manifest and schema in index
path_tokenmap = "{}/index/backup_index/{}/tokenmap_{}.json".format(
path_root, backup_name, fqdn
)
write_dummy_file(path_tokenmap, backup_datetime, fake_tokenmap)
path_manifest = "{}/index/backup_index/{}/manifest_{}.json".format(
path_root, backup_name, fqdn
)
write_dummy_file(path_manifest, backup_datetime, fake_tokenmap)
path_schema = "{}/index/backup_index/{}/schema_{}.cql".format(
path_root, backup_name, fqdn
)
write_dummy_file(path_schema, backup_datetime, fake_tokenmap)
dir_path = os.path.join(path_root, "index", "latest_backup", fqdn)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# fake token map in latest_backup
path_latest_backup_tokenmap = "{}/index/latest_backup/{}/tokenmap.json".format(
path_root, fqdn
)
write_dummy_file(path_latest_backup_tokenmap, backup_datetime, fake_tokenmap)
# fake token name in latest_backup
path_latest_backup_name = "{}/index/latest_backup/{}/backup_name.txt".format(
path_root, fqdn
)
write_dummy_file(path_latest_backup_name, backup_datetime)
# fake actual backup folder
dir_path = os.path.join(path_root, fqdn, backup_name, "meta")
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# fake schema in actual backup path
path_schema = "{}/{}/{}/meta/schema.cql".format(path_root, fqdn, backup_name)
write_dummy_file(path_schema, backup_datetime)
# fake manifest in actual backup path
path_manifest = "{}/{}/{}/meta/manifest.json".format(path_root, fqdn, backup_name)
write_dummy_file(path_manifest, backup_datetime)
# fake token map in actual backup path
path_tokenmap = "{}/{}/{}/meta/tokenmap.json".format(path_root, fqdn, backup_name)
write_dummy_file(path_tokenmap, backup_datetime, fake_tokenmap)
@then(r'the latest cluster backup is "{expected_backup_name}"')
def _the_latest_cluster_backup_is(context, expected_backup_name):
storage = Storage(config=context.medusa_config.storage)
backup = storage.latest_cluster_backup()
assert expected_backup_name == backup.name
@then(r"there is no latest complete backup")
def _there_is_no_latest_complete_backup(context):
storage = Storage(config=context.medusa_config.storage)
actual_backup = storage.latest_complete_cluster_backup()
assert actual_backup is None
@then(r"I can list and print backups without errors")
def _can_list_print_backups_without_error(context):
medusa.listing.list_backups(config=context.medusa_config, show_all=True)
@then(r'the latest complete cluster backup is "{expected_backup_name}"')
def _the_latest_complete_cluster_backup_is(context, expected_backup_name):
storage = Storage(config=context.medusa_config.storage)
actual_backup = storage.latest_complete_cluster_backup()
if actual_backup is not None:
assert expected_backup_name == actual_backup.name
@when(r"I truncate the backup index")
def _truncate_the_index(context):
path_root = "/tmp/medusa_it_bucket"
index_path = "{}/index".format(path_root)
shutil.rmtree(index_path)
@when(r"I truncate the backup folder")
def _truncate_the_backup_folder(context):
path_root = "/tmp/medusa_it_bucket"
backup_path = "{}/localhost".format(path_root)
shutil.rmtree(backup_path)
@when(r"I re-create the backup index")
def _recreate_the_index(context):
medusa.index.build_indices(context.medusa_config, False)
@then(r"I can report latest backups without errors")
def _can_report_backups_without_errors(context):
medusa.report_latest.report_latest(config=context.medusa_config, push_metrics=True)
@then(r"the backup index does not exist")
def _the_backup_index_does_not_exist(context):
storage = Storage(config=context.medusa_config.storage)
assert False is medusa.index.index_exists(storage)
@then(r"the backup index exists")
def _the_backup_index_exists(context):
storage = Storage(config=context.medusa_config.storage)
assert True is medusa.index.index_exists(storage)
@then(
r'I can see {nb_sstables} SSTables in the SSTable pool for the "{table_name}" table in keyspace "{keyspace}"'
)
def _i_can_see_nb_sstables_in_the_sstable_pool(
context, nb_sstables, table_name, keyspace
):
storage = Storage(config=context.medusa_config.storage)
path = os.path.join(
context.medusa_config.storage.fqdn, "data", keyspace, table_name
)
objects = storage.storage_driver.list_objects(path)
sstables = list(filter(lambda obj: "-Data.db" in obj.name, objects))
if len(sstables) != int(nb_sstables):
logging.error("{} SSTables : {}".format(len(sstables), sstables))
logging.error("Was expecting {} SSTables".format(nb_sstables))
assert len(sstables) == int(nb_sstables)
@then(
r'backup named "{backup_name}" has {nb_files} files '
+ r'in the manifest for the "{table_name}" table in keyspace "{keyspace_name}"'
)
def _backup_named_something_has_nb_files_in_the_manifest(
context, backup_name, nb_files, table_name, keyspace_name
):
storage = Storage(config=context.medusa_config.storage)
node_backups = storage.list_node_backups()
# Find the backup we're looking for
target_backup = list(
filter(lambda backup: backup.name == backup_name, node_backups)
)[0]
# Parse its manifest
manifest = json.loads(target_backup.manifest)
for section in manifest:
if (
section["keyspace"] == keyspace_name
and section["columnfamily"][: len(table_name)] == table_name
):
if len(section["objects"]) != int(nb_files):
logging.error(
"Was expecting {} files, got {}".format(
nb_files, len(section["objects"])
)
)
assert len(section["objects"]) == int(nb_files)
@then(r'I can see secondary index files in the "{backup_name}" files')
def _i_can_see_secondary_index_files_in_backup(context, backup_name):
storage = Storage(config=context.medusa_config.storage)
node_backups = storage.list_node_backups()
target_backup = list(filter(lambda backup: backup.name == backup_name, node_backups))[0]
manifest = json.loads(target_backup.manifest)
seen_index_files = 0
for section in manifest:
for f in section['objects']:
if 'idx' in f['path']:
seen_index_files += 1
assert seen_index_files > 0
@then(r'verify fails on the backup named "{backup_name}"')
def _verify_fails_on_the_backup_named(context, backup_name):
try:
medusa.verify.verify(context.medusa_config, backup_name)
raise AssertionError("Backup verification should have failed but didn't.")
except RuntimeError:
# This exception is required to be raised to validate the step
pass
@when(r"I purge the backup history to retain only {backup_count} backups")
def _i_purge_the_backup_history_to_retain_only_nb_backups(context, backup_count):
medusa.purge.main(context.medusa_config, max_backup_count=int(backup_count))
@then(r"I see {metrics_count} metrics emitted")
def _i_see_metrics_emitted(context, metrics_count):
metrics = list(LocalMonitoring(context.medusa_config).load_metrics())
logging.info("There is {} metrics".format(len(metrics)))
logging.info("The metrics are: {}".format(metrics))
assert int(len(metrics)) == int(metrics_count)
@when(r'I truncate the "{table_name}" table')
def _i_truncate_the_table(context, table_name):
context.session = connect_cassandra()
context.session.execute("truncate {}".format(table_name))
@then(r'I can verify the restore verify query "{query}" returned {expected_rows} rows')
def _i_can_verify_the_restore_verify_query_returned_rows(context, query, expected_rows):
restore_config = {
"health_check": "cql",
"query": query,
"expected_rows": expected_rows,
}
custom_config = MedusaConfig(
storage=context.medusa_config.storage,
cassandra=context.medusa_config.cassandra,
monitoring=context.medusa_config.monitoring,
restore=_namedtuple_from_dict(ChecksConfig, restore_config),
ssh=None,
)
medusa.verify_restore.verify_restore(["localhost"], custom_config)
def connect_cassandra():
connected = False
attempt = 0
session = None
while not connected and attempt < 10:
try:
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
connected = True
except cassandra.cluster.NoHostAvailable:
attempt += 1
time.sleep(10)
return session
def write_dummy_file(path, mtime_str, contents=None):
# create the file. if there's some contents, write them too
with open(path, "w") as f:
if contents is not None:
f.write(contents)
f.flush()
f.close()
# we set the access and modification times for the file we just created
# this time is set as seconds since epoch
t = datetime.datetime.strptime(mtime_str, "%Y-%m-%d %H:%M:%S")
mtime = (t - datetime.datetime(1970, 1, 1)).total_seconds()
atime = mtime
os.utime(path, (atime, mtime))
| 36.089838
| 113
| 0.686186
|
e964e12273eb2acbc3121f3ff86b38ea634e0132
| 54,920
|
py
|
Python
|
raster_handler/rh.py
|
gustavoalens/raster_handler
|
513760b0ef085af40b76162a0c5f9d6ffa2d4a94
|
[
"MIT"
] | null | null | null |
raster_handler/rh.py
|
gustavoalens/raster_handler
|
513760b0ef085af40b76162a0c5f9d6ffa2d4a94
|
[
"MIT"
] | null | null | null |
raster_handler/rh.py
|
gustavoalens/raster_handler
|
513760b0ef085af40b76162a0c5f9d6ffa2d4a94
|
[
"MIT"
] | null | null | null |
from osgeo import gdal, ogr
from skimage import segmentation
from skimage.exposure import histogram, adjust_gamma, equalize_hist
from skimage.feature import local_binary_pattern, hog, greycomatrix, greycoprops
from skimage.filters import median, gaussian, sobel, laplace
from skimage import img_as_uint, img_as_ubyte, img_as_float64
from scipy.stats import entropy, kurtosis
from scipy.ndimage import variance
from scipy.fftpack import fft2, ifft2, fftshift, ifftshift
import pandas as pd
import tensorflow as tf
from sklearn import svm
from sklearn import metrics
from typing import Union
import numpy as np
import os
from tempfile import gettempdir
"""
"""
# tf.compat.v1.enable_eager_execution()
# - Constantes - #
TIFF = 'GTiff'
RNA = 0
SVM = 1
FT_GAMA = 0
FT_EQLHIST = 1
FT_MEDIANA = 2
FT_GAUSS = 3
FT_SOBEL = 4
FT_LAPLACE = 5
FT_FPB_IDEAL = 6
FT_FPA_IDEAL = 7
CI_RGB = {1: gdal.GCI_RedBand, 2: gdal.GCI_GreenBand, 3: gdal.GCI_BlueBand}
DTYPE_CVT = {gdal.GDT_Byte: np.uint8, gdal.GDT_UInt16: np.uint16}
# - Funções de Manipulação de Raster - #
def get_bandas(raster):
"""
Verifica se o arquivo é um gdal.Dataset e retorna apenas a banda ou lista de bandas
Args:
raster (Union[gdal.Dataset, gdal.Band]): gdal.Dataset ou gdal.Band que será checado
Returns:
Union[list of gdal.Band, gdal.Band]: gdal.Band ou uma lista de gdal.Band com cada banda encontrada no gdal.Dataset
"""
if type(raster) is gdal.Dataset:
# contagem de bandas no raster
raster_qtd = raster.RasterCount
if raster_qtd == 1:
return raster.GetRasterBand(1)
else:
bands = list()
for b in range(1, raster_qtd + 1):
bands.append(raster.GetRasterBand(b))
return bands
elif type(raster) is gdal.Band:
return raster
else:
print('Erro: Não é Band nem Dataset')
return None
def check_resol_radio(rasters):
"""
Verifica se os rasters estão na mesma resolução radiométrica para manipulações deste
Args:
rasters (Union[list of gdal.Dataset, list of gdal.Band]):
Returns:
bool: True se os rasters/bandas estão na mesma resolução
"""
if type(rasters) is not list:
rasters = list(rasters)
rradios = set()
for raster in rasters:
tp = type(raster)
if tp is gdal.Dataset:
for b in range(1, raster.RasterCount + 1):
rradios.add(raster.GetRasterBand(b).DataType)
elif tp is gdal.Band:
rradios.add(raster.DataType)
else:
print('Erro no tipo de arquivo')
return None
print(rradios)
if len(rradios) > 1:
return False
return True
def check_projection(rasters):
"""
Args:
rasters (list of gdal.Dataset):
Returns:
bool: True se rasters possuem mesma projeção e False se não
"""
if type(rasters) is not list:
print("Erro de tipo")
return None
projs = set()
for raster in rasters:
if type(raster) is not gdal.Dataset:
print("Erro de tipo")
return None
else:
projs.add(raster.GetProjection())
if len(projs) > 1:
return False
return True
def check_num_bandas(rasters):
"""
Args:
rasters:
Returns:
int: total de bandas ou 0 se numero de bandas são diferentes ou vazia
"""
tp = type(rasters)
if tp is gdal.Dataset:
tot = rasters.RasterCount
elif tp is gdal.Band:
tot = 1
elif tp is list:
tot = rasters[0].RasterCount
for raster in rasters[1:]:
if type(raster) is not gdal.Dataset:
print("Erro de tipo")
return None
else:
if raster.RasterCount != tot:
return 0
else:
print("Erro de tipo")
return None
return tot
def cria_destino(path, nome, desc, ext='tif', extra=None):
""" Cria caminho de onde será salvo algum Dataset.
Args:
path (str): diretório do arquivo
nome (str): nome do arquivo
ext (str): extensão que será salvo o arquivo
desc (str): caminho de um Dataset que será manipulado (usado caso não seja passado path ou nome
extra (str): informação acrescentada depois do nome do arquivo
Returns:
str: caminho do arquivo
"""
if not nome:
nome = f'{os.path.splitext(os.path.basename(desc))[0]}'
if extra:
nome += f'_{extra}'
if not path:
path = os.path.dirname(desc)
return f'{path}/{nome}.{ext}'
def csv_str_list2list(str_list, tipo):
"""
Função para converter lista salva em csv para o tipo correto dos itens
Args:
str_list (str): lista em string, geralmente como fica salvo lista em csv pelo Pandas
tipo (function): função de conversão do tipo. Ex: float, int
Returns (list): Retorna uma lista com os itens do tipo do parâmetro
"""
# ToDo: checar se é lista e conv é função
return [tipo(x) for x in str_list.strip('[]').split(', ')]
def compor_rgb(r, g, b, ext=TIFF, path=None, nome=None):
"""
Une 3 rasters de única banda em um raster de composição colorida RGB
Args:
r (Union[gdal.Dataset, gdal.Band]): gdal.Dataset referente a banda Red
g (Union[gdal.Dataset, gdal.Band]): gdal.Dataset referente a banda Green
b (Union[gdal.Dataset, gdal.Band]): gdal.Dataset referente a banda Blue
ext (str): gdal.Driver name. Default = 'GTiff'
path (str): caminho do diretório de saída do arquivo raster
nome (str): nome para o raster. Default = gerará automaticamente
Returns:
gdal.Dataset: gdal.Dataset com 3 rasters referente a composição colorida RGB
"""
# - checagem de possiveis erros
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
gd_r = get_bandas(r)
if not gd_r or type(gd_r) is list:
print('Erro no arquivo raster')
return None # ou padronizar erros - Erro no arquivo raster
gd_g = get_bandas(g)
if not gd_g or type(gd_g) is list:
print('Erro no arquivo raster')
return None # ou padronizar erros - Erro no arquivo raster
gd_b = get_bandas(b)
if not gd_b or type(gd_b) is list:
print('Erro no arquivo raster')
return None # ou padronizar erros - Erro no arquivo raster
if not check_resol_radio([gd_r, gd_g, gd_b]):
print('Rasters com resoluções radiométrica diferentes')
return None # padronizar erros - Rasters com resoluções radiométrica diferentes
# salvando o tipo de dado do raster, referente a resolução radiométrica
dtype = gd_r.DataType
# salvando informações geográficas do raster
col = r.RasterXSize
row = r.RasterYSize
geo_transf = r.GetGeoTransform()
proj = r.GetProjection()
if col != g.RasterXSize or col != b.RasterXSize or\
row != g.RasterYSize or row != b.RasterYSize:
print('Rasters em posições diferentes')
return None # erro - Rasters em posições diferentes
if geo_transf != g.GetGeoTransform() or geo_transf != b.GetGeoTransform(): # pode dar sempre diferente
print('Rasters com geotransformações diferentes')
return None # erro - Rasters com geotransformações diferentes
if not check_projection([r, g, b]):
print('Rasters com projeções diferentes')
return None # erro - Rasters com projeções diferentes
# criando novo arquivo raster da composição
driver = gdal.GetDriverByName(ext)
dest = cria_destino(path, nome, r.GetDescription(), extra='comp_RGB')
comp = driver.Create(dest, col, row, 3, dtype, ['PHOTOMETRIC=RGB'])
# adicionando as informações geográficas
comp.SetGeoTransform(geo_transf)
comp.SetProjection(proj)
# escrevendo os dados das bandas no raster
bands = [gd_r, gd_g, gd_b]
for b in range(3):
rb = b + 1
comp.GetRasterBand(rb).WriteArray(bands[b].ReadAsArray(0, 0, col, row))
# atualizando as alterações no raster
comp.FlushCache()
return comp
def alterar_ref_espacial(raster, ref_nova, path=None, nome=None):
"""
Altera a referência espacial do Dataset, utilizando o padrão EPSG
Args:
raster (gdal.Dataset): gdal.Dataset que se deseja alterar referência
ref_nova (Union[int, str]): tipo de referência pelo padrão EPSG. Exemplo: 4628
path (str): caminho do diretório de saída do arquivo raster. Default:
nome (str): nome para o raster. Default: {nome atual}_EPSG:{ref_nova}
Returns:
gdal.Dataset: gdal.Dataset atualizado com nova referência espacial
"""
# checando possíveis erros
if type(raster) is not gdal.Dataset:
print('Não é um gdal.Dataset')
return None
if type(ref_nova) is not int:
try:
ref_nova = int(ref_nova)
except TypeError:
print('Projeção precisa ser só o valor inteiro do padrão EPSG')
return None
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
# criando caminho do arquivo que será gerado
dest = cria_destino(path, nome, raster.GetDescription(), extra=f'EPSG-{ref_nova}')
# executando a função de utilidade Warp para atualizar referencia espacial
raster_ref = gdal.Warp(dest, raster, dstSRS=f'EPSG:{ref_nova}', outputType=raster.GetRasterBand(1).DataType)
if not raster_ref:
print('Erro na projeção')
return raster_ref
def f16t8bits(raster, noData=0, path=None, nome=None):
"""
Converte a resolução radiométrica de 16 bits para 8 bits de forma escalável
Args:
raster (gdal.Dataset): raster de entrada para conversão
path (str):
nome (str):
Returns:
gdal.Dataset: raster convertido para 8 bits
"""
# checando possíveis erros
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
if type(raster) is not gdal.Dataset:
print("Erro de tipo")
return None
if raster.GetRasterBand(1).DataType == gdal.GDT_Byte:
print("Já está em 8 bits")
return raster
if raster.GetRasterBand(1).DataType != gdal.GDT_UInt16:
print("Não é 16 bits")
return None
# criando caminho do arquivo que será gerado
dest = cria_destino(path, nome, raster.GetDescription(), extra='8bits')
# executando a função de utilidade Warp para atualizar resolução radiométrica
nraster = gdal.Translate(dest, raster, scaleParams=[0, 65535, 0, 255], outputType=gdal.GDT_Byte, noData=noData)
return nraster
def mosaicar(rasters, nodata_value=0, path=None, nome=None):
"""
Constrói mosaico de rasters utilizando gdal.Warp.
Necessário que os rasters estejam com mesma resolução radiométrica, sistema de coordenadas e numero de bandas
Args:
rasters (list of gdal.Dataset): lista dos raster que serão mosaicados
nodata_value (int): valor que será considerado nulo
path (str): caminho do diretório de saída do arquivo raster
nome (str): nome para o raster
Returns:
gdal.Dataset: gdal.Dataset com o raster final mosaicado
"""
# checagem de possíveis erros
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
if type(rasters) is not list or len(rasters) < 2:
print('Não é uma lista de rasters para mosaicar')
return None
if not check_resol_radio(rasters):
print('Rasters com resoluções radiométrica diferentes')
return None # padronizar erros - Rasters com resoluções radiométrica diferentes
if not check_projection(rasters): # talvez tenha que usar a classe de projeção pra comparar de fato já que tem posições diferentes (por ora parece que nao)
print('Rasters com projeções diferentes')
return None # erro - Rasters com projeções diferentes
if not check_num_bandas(rasters):
print('Rasters com quantidade diferente de bandas')
return None
# prepara diretório que salvará o raster mosaicado
dest = cria_destino(path, nome, rasters[0].GetDescription(), extra='mosaico')
# utiliza função de utilidade Warp
msc = gdal.Warp(dest, rasters, srcNodata=nodata_value, dstNodata=nodata_value, multithread=True)
# define a forma de interpretação de cor do raster
rc = msc.RasterCount
if msc: # Substituir pra try e expect
if rc == 1:
msc.GetRasterBand(1).SetColorInterpretation(gdal.GCI_GrayIndex)
else:
msc.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)
msc.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)
msc.GetRasterBand(3).SetColorInterpretation(gdal.GCI_BlueBand)
msc.FlushCache()
else:
print('Erro no mosaico')
return msc
def recortar(raster, shape_path, where=None, nodata_value=0, path=None, nome=None):
"""Recorta um raster a partir de um shape
Args:
raster (gdal.Dataset): raster que sofrerá o recorte
shape_path (str): diretório do shape + nome
where (str): cláusula where no shape para recorte
nodata_value (int): valor considerado sem dado do raster
path (str): diretório que será salvo
nome (str): nome do arquivo que será salvo
Returns:
gdal.Dataset: raster recortado pelo shape
"""
# checagem de possíveis erros
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
if not os.path.isfile(shape_path):
print('Erro no arquivo/diretório')
# prepara diretório que salvará o raster mosaicado
dest = cria_destino(path, nome, raster.GetDescription(), extra='croped')
# utiliza função de utilidade Warp
rec = gdal.Warp(dest, raster, cutlineDSName=shape_path, srcNodata=nodata_value, dstNodata=nodata_value,
cutlineWhere=where, cropToCutline=True, outputType=raster.GetRasterBand(1).DataType,
multithread=True)
if not rec:
print('Erro no recorte')
return None
rec.FlushCache()
return rec
def shp2raster(shape, pixel_wh=30, field=None, path=None, nome=None):
"""Converte um arquivo .shp (ESRI shapefile) em um arquivo raster de acordo com as classes do field escolhido
Args:
shape (Union[ogr.DataSource, str]): caminho do arquivo raster ou raster já carregado por drive GDAL
pixel_wh (int): precisão do pixel, referente a resolução espacial. em m (30m = bandas multiespectrais landsat)
field (str): nome do campo referência para conversão
path (str): caminho do diretório onde será salvo o raster
nome (str): nome do arquivo raster
Returns:
(gdal.Dataset, dict): raster codificado resultante da conversão do shape e dicionário dos códigos das classes
"""
# Checando tipos e possíveis erros
tp_shp = type(shape)
if tp_shp is str:
shp = ogr.Open(shape, 0)
elif tp_shp is ogr.DataSource:
shp = shape
else:
print("Erro de tipo")
return None
dest = cria_destino(path, nome, shp.GetDescription(), extra='shp2ras')
if not dest:
print('Erro: Arquivo destino não foi criado')
return None
lyr_shp = shp.GetLayer()
ldef_shp = lyr_shp.GetLayerDefn()
fi_shp = ldef_shp.GetFieldIndex(field) # numero de indíce do field referência
if fi_shp < 0:
print('Erro: field não existe')
return None
# criando shape temporário (a API GDAL não permite salvar alterações no arquivo principal
# e prevenir erros no arquivo fonte)
driver = ogr.GetDriverByName('ESRI Shapefile')
shp_aux = driver.CreateDataSource(os.path.join(gettempdir(), 'tmp.shp'))
# criando layer do shape auxiliar com mesmas características do shape fonte
lyr_shp_aux = shp_aux.CreateLayer(lyr_shp.GetName(), lyr_shp.GetSpatialRef(), lyr_shp.GetGeomType())
ldef_shp_aux = lyr_shp_aux.GetLayerDefn()
# copiando configuração do campo geométrico do shape para o shape auxiliar
geom_old = ldef_shp.GetGeomFieldDefn(0)
geom_new = ogr.GeomFieldDefn(geom_old.GetName())
geom_new.SetType(geom_old.GetType())
geom_new.SetSpatialRef(geom_old.GetSpatialRef())
# criando o field que será salvo os códigos no shape auxiliar
field_new = ogr.FieldDefn('code')
field_new.SetType(0)
ldef_shp_aux.AddFieldDefn(field_new)
# adicionando os fields no layer do shape auxiliar
ldef_shp_aux.AddGeomFieldDefn(geom_new)
fi_shp_aux = ldef_shp_aux.GetFieldIndex('code')
# checando o tipo de dado do field
field_type = ldef_shp.GetFieldDefn(fi_shp).type
# auxiliar para criar codificação caso já não seja codificado o field referência
id_aux = 1
# dicionário dos códigos das classes
dic_codes = dict()
for i in range(lyr_shp.GetFeatureCount()):
# salvando a classe da feature atual
ftre_old = lyr_shp.GetFeature(i)
classe = ftre_old.GetField(fi_shp)
# salvando no dicionário caso a classe ainda não tinha sido carregada
if classe not in dic_codes:
if field_type == 0: # caso já seja um campo codificado
dic_codes[classe] = classe
else:
dic_codes[classe] = id_aux
id_aux += 1
# criando a feature a ser inserida no shape auxiliar
ftre = ogr.Feature(ldef_shp_aux)
# adicionando o codigo e geometria da feature
ftre.SetField(fi_shp_aux, dic_codes[classe])
ftre.SetGeometry(ftre_old.GetGeometryRef())
# adicionando a feature no shape auxiliar
lyr_shp_aux.CreateFeature(ftre)
# extent do shape auxiliar que será utilizado como referência pra criar o raster
x_mn, x_mx, y_mn, y_mx = lyr_shp.GetExtent()
print(lyr_shp.GetExtent())
cols = int((x_mx - x_mn) / pixel_wh)
rows = int((y_mx - y_mn) / pixel_wh)
if not cols or not rows:
print('Coluna ou Largura está zerada')
return None
if len(dic_codes) < 256:
dtype = gdal.GDT_Byte
else:
dtype = gdal.GDT_UInt16
# criando e preparando as informações geográficas do Raster
raster = gdal.GetDriverByName('GTiff').Create(dest, cols, rows, 1, dtype)
raster.SetGeoTransform((x_mn, pixel_wh, 0, y_mx, 0, -pixel_wh))
raster.SetProjection(lyr_shp.GetSpatialRef().ExportToWkt())
raster.GetRasterBand(1).SetNoDataValue(0)
# transformando o shape auxiliar em raster
gdal.RasterizeLayer(raster, [1], lyr_shp_aux, options=['ATTRIBUTE=code'])
# atualizando as alterações no raster
raster.FlushCache()
# excluindo o shape auxiliar
path_t = shp_aux.GetDescription()
shp_aux = None
os.remove(path_t)
return raster, dic_codes
def gdal2nparray(raster):
"""
Args:
raster (gdal.Dataset):
Returns: np.ndarray:
"""
raster_t = type(raster)
if raster_t is gdal.Dataset:
bandas = get_bandas(raster)
if len(bandas) > 3:
print('Utilizando somente as 3 primeiras bandas')
bandas = bandas[:3]
for i in range(len(bandas)):
bandas[i] = bandas[i].ReadAsArray()
np_raster = np.dstack(tuple(bandas))
elif raster_t is np.ndarray:
np_raster = raster
else:
print('Erro no tipo')
return None
return np_raster
def segmentar(raster, scale, min_size, path=None, nome=None):
"""Segmenta um raster em regiões por meio do algoritmo felzenszwalb
Args:
raster (gdal.Dataset): Raster a ser segmentado
scale (int): Scale do algoritmo felzenszwalb
min_size (int): min_size do algoritmo felzenszwalb
path (str): diretório do arquivo
nome (str): nome do arquivo
Returns:
(gdal.Dataset): Raster segmentado, onde cada região terá um valor de identificação único em seus pixels.
"""
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
np_raster = gdal2nparray(raster)
if np_raster:
np_seg = segmentation.felzenszwalb(np_raster, scale=scale, min_size=min_size)
np_seg = np_seg + 1
dest = cria_destino(path, nome, raster.GetDescription(), extra='segmentation')
seg = gdal.GetDriverByName('GTiff').Create(dest, raster.RasterXSize, raster.RasterYSize, 1, gdal.GDT_UInt16)
seg.SetGeoTransform(raster.GetGeoTransform())
seg.SetProjection(raster.GetProjection())
seg.GetRasterBand(1).WriteArray(np_seg)
seg.FlushCache()
if not seg:
print('Erro ao criar raster')
return None
return seg
return None
def calc_total_pixels(banda):
larg, alt = banda.shape
return larg * alt
def calc_assimetria(banda, mean, std, npix):
soma = banda - mean
soma = soma ** 3
ndvp = std ** 3 * npix
return (1/ndvp) * np.sum(soma)
def extrair_carac_regiao(regiao, caracteristicas, params=None):
"""
Extração das caracterísitcas:
- hog: Histogram of Oriented Gradients
- media: media dos valores do pixel de cada banda
- dsv_p: desvio padrão dos valores do pixel de cada banda
- ast: assimetria
- var: variancia
- ent: entropia
- crt: curtose
- glcm: Grey level Co-occurrence Matrix (contraste, dissimilaridade, homogeneidade, ASM, energia, correlação)
- lbp: Local Binary Pattern
Args:
regiao (Union[np.ndarray, np.ma.core.MaskedArray]): região de imagem que será extraída as características
caracteristicas (list of str): lista de características a serem extraídas
params (dict): parametros para o algoritmo de extração
Returns:
(list of float): lista de características extraídas
"""
features = pd.DataFrame()
b1 = regiao[..., 0]
b2 = regiao[..., 1]
b3 = regiao[..., 2]
# total de pixels
b1_npix = calc_total_pixels(b1)
b2_npix = calc_total_pixels(b2)
b3_npix = calc_total_pixels(b3)
# média
b1_mean = b2_mean = b3_mean = mean = None
if 'media' in caracteristicas:
b1_mean = np.mean(b1)
b2_mean = np.mean(b2)
b3_mean = np.mean(b3)
features = features.assign(media=[[b1_mean, b2_mean, b3_mean, mean]])
b1_std = b2_std = b3_std = std = None
# desvio padrão
if 'dsv_p' in caracteristicas:
b1_std = np.std(b1)
b2_std = np.std(b2)
b3_std = np.std(b3)
features = features.assign(dsv_p=[[b1_std, b2_std, b3_std, std]])
# assimetria
if 'ast' in caracteristicas:
if not b1_mean:
b1_mean = np.mean(b1)
b2_mean = np.mean(b2)
b3_mean = np.mean(b3)
if not b1_std:
b1_std = np.std(b1)
b2_std = np.std(b2)
b3_std = np.std(b3)
b1_ast = calc_assimetria(b1, b1_mean, b1_std, b1_npix)
b2_ast = calc_assimetria(b2, b2_mean, b2_std, b2_npix)
b3_ast = calc_assimetria(b3, b3_mean, b3_std, b3_npix)
features = features.assign(ast=[[b1_ast, b2_ast, b3_ast]])
# variancia
if 'var' in caracteristicas:
b1_var = variance(b1)
b2_var = variance(b1)
b3_var = variance(b1)
features = features.assign(var=[[b1_var, b2_var, b3_var]])
# histograma
if 'ent' or 'crt' in caracteristicas:
b1_hst, _ = histogram(b1, nbins=b1.max()) # alterar nbins de acordo com dtype
b2_hst, _ = histogram(b2, nbins=b2.max()) # alterar nbins de acordo com dtype
b3_hst, _ = histogram(b3, nbins=b3.max()) # alterar nbins de acordo com dtype
# entropia - hst
if 'ent' in caracteristicas:
b1_ent = entropy(b1_hst)
b2_ent = entropy(b2_hst)
b3_ent = entropy(b3_hst)
features = features.assign(ent=[[b1_ent, b2_ent, b3_ent]])
# curtose - hst
if 'crt' in caracteristicas:
b1_crt = kurtosis(b1_hst)
b2_crt = kurtosis(b2_hst)
b3_crt = kurtosis(b3_hst)
features = features.assign(crt=[[b1_crt, b2_crt, b3_crt]])
if not params:
params = dict()
if type(params) is not dict:
print('Params precisa ser do tipo dict')
return None
# lbp
if 'lbp' in caracteristicas:
if 'P' in params:
p = params['P']
if type(p) is not int:
print('P precisa ser um valor inteiro')
return None
else:
p = 8
if 'R' in params:
r = params['R']
if type(r) is not int and type(r) is not float:
print('R precisa ser um valor float')
return None
else:
r = 1
b1_lbp = local_binary_pattern(b1, p, r, method='ror')
b2_lbp = local_binary_pattern(b2, p, r, method='ror')
b3_lbp = local_binary_pattern(b3, p, r, method='ror')
b1_lbp_h, _ = histogram(b1_lbp.ravel())
b2_lbp_h, _ = histogram(b2_lbp.ravel())
b3_lbp_h, _ = histogram(b3_lbp.ravel())
b1_min = b1_lbp_h.min()
b2_min = b2_lbp_h.min()
b3_min = b3_lbp_h.min()
# ToDo: melhorar normalização
# -> checar se só serão utilizados o primeiro e o último do histograma
# (possivelmente mudar o nbins do histogram resolve)
b1_lbp_h = (b1_lbp_h - b1_min) / (b1_lbp_h.max() - b1_min)
b2_lbp_h = (b2_lbp_h - b2_min) / (b2_lbp_h.max() - b2_min)
b3_lbp_h = (b3_lbp_h - b3_min) / (b3_lbp_h.max() - b3_min)
features = features.assign(lbp_b1=[list(b1_lbp_h)], lbp_b2=[list(b2_lbp_h)], lbp_b3=[list(b3_lbp_h)])
# hog
if 'hog' in caracteristicas:
if 'pixels_per_cell' in params:
pixels_per_cell = params['pixels_per_cell']
if type(pixels_per_cell) is not tuple and len(pixels_per_cell) != 2:
print('Erro no parametro pixels_per_cell. Ex: (8, 8)')
return None
else:
pixels_per_cell = (8, 8)
if 'pixels_per_cell' in params:
cells_per_block = params['pixels_per_cell']
if type(cells_per_block) is not tuple and len(cells_per_block) != 2:
print('Erro no parametro cells_per_block. Ex: (3, 3)')
return None
else:
cells_per_block = (8, 8)
h = hog(regiao, block_norm='L2-Hys', visualize=False, feature_vector=True, multichannel=True,
pixels_per_cell=pixels_per_cell, cells_per_block=cells_per_block)
features = features.assign(hog=[list(h)])
# glcm
if 'glcm' in caracteristicas:
if 'distances' in params:
distances = params['distances']
if type(distances) is list:
for dist in distances:
if type(dist) is not int:
print('Valores de distancia devem ser inteiros')
return None
else:
print('Distancia devem estar em lista')
return None
else:
distances = [1]
if 'angles' in params:
angles = params['angles']
if type(angles) is list:
for ang in angles:
if type(ang) is not float and type(ang) is not int:
print('Valores de angulos devem ser float')
return None
else:
print('Angulos devem estar em lista')
return None
else:
angles = [np.pi / 2]
b1_glcm = greycomatrix(b1, distances, angles, levels=b1.max() + 1)
b2_glcm = greycomatrix(b2, distances, angles, levels=b2.max() + 1)
b3_glcm = greycomatrix(b3, distances, angles, levels=b3.max() + 1)
glcm_res = list()
# contrast
glcm_res.append(greycoprops(b1_glcm, 'contrast')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'contrast')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'contrast')[0][0])
# dissimilarity
glcm_res.append(greycoprops(b1_glcm, 'dissimilarity')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'dissimilarity')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'dissimilarity')[0][0])
# homogeneity
glcm_res.append(greycoprops(b1_glcm, 'homogeneity')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'homogeneity')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'homogeneity')[0][0])
# energy
glcm_res.append(greycoprops(b1_glcm, 'energy')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'energy')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'energy')[0][0])
# correlation
glcm_res.append(greycoprops(b1_glcm, 'correlation')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'correlation')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'correlation')[0][0])
# ASM
glcm_res.append(greycoprops(b1_glcm, 'ASM')[0][0])
glcm_res.append(greycoprops(b2_glcm, 'ASM')[0][0])
glcm_res.append(greycoprops(b3_glcm, 'ASM')[0][0])
features = features.assign(glcm=[glcm_res])
return features
def extrair_caracteristicas(raster, mask, caracteristicas, params=None):
"""
Separa a imagem nas regiões definidas da máscara e executa a função de extração para cada região.
Extração das caracterísitcas:
- hog: Histogram of Oriented Gradients
- media: media dos valores do pixel de cada banda
- dsv_p: desvio padrão dos valores do pixel de cada banda
- var: variancia
- entr: entropia
- crt: curtose
- glcm: Grey level Co-occurrence Matrix (contraste, dissimilaridade, homogeneidade, ASM, energia, correlação)
- lbp: Local Binary Pattern
Args:
raster (Union[gdal.Dataset, np.ndarray]): raster a sofrer a extração de características
mask (Union[gdal.Dataset, np.ndarray]): raster máscara que define as divisões das regiões
caracteristicas (list of str): lista de características a serem extraídas
params (dict): parametros para o algoritmo de extração
Returns:
(pd.DataFrame): DataFrame com todas as características extraídas de todas regiões
"""
# checando o preparando raster
raster_t = type(raster)
if raster_t is gdal.Dataset:
bandas = get_bandas(raster)
# readasarray converte todas 3 bandas
for i in range(len(bandas)):
bandas[i] = bandas[i].ReadAsArray()
np_raster = np.dstack(tuple(bandas))
# np_raster = gdal2nparray(bandas)
elif raster_t is np.ndarray:
np_raster = raster
else:
print('!Erro no tipo')
return None
# checando o preparando mask
mask_t = type(mask)
if mask_t is gdal.Dataset:
banda = get_bandas(mask)
np_mask = banda.ReadAsArray()
elif mask_t is np.ndarray:
np_mask = mask
else:
print('Erro no tipo')
return None
for c in caracteristicas:
if c not in ['hog', 'media', 'dsv_p', 'var', 'entr', 'crt', 'glcm', 'lbp']:
print('Erro: característica inválida')
return None
features = pd.DataFrame()
regioes = np.unique(np_mask)
regioes = np.delete(regioes, 0)
for reg in regioes:
rows, cols = np.where(np_mask == reg)
np_reg = np_raster[min(rows): max(rows) + 1, min(cols): max(cols) + 1, :].copy()
np_mask_reg = np_mask[min(rows): max(rows) + 1, min(cols): max(cols) + 1].copy()
r, c = np.where(np_mask_reg == reg)
np_mask_reg[r, c] = 0
np_reg = np.ma.masked_array(
np_reg,
np.dstack((np_mask_reg, np_mask_reg, np_mask_reg)),
fill_value=0
)
ftr_regiao = extrair_carac_regiao(np_reg, caracteristicas, params)
if ftr_regiao is None:
return None
ftr_regiao = ftr_regiao.assign(reg=int(reg))
features = features.append(ftr_regiao, ignore_index=True, sort=False)
return features
def create_npmask(np_raster, no_data):
"""Função auxiliar que cria uma mascara para ignorar a região sem informação do raster original
Args:
np_raster (np.ndarray):
no_data (int): valor considerado como nenhum dado no raster
Returns:
(np.ndarray): mascara onde 0 será o valor da região ignorada e 1 o valor da região válida
"""
np_mask = np.ones(np_raster.shape, np.uint8)
coords = np.where(np_raster == no_data)
np_mask[coords] = 0
return np_mask
def create_fmask(shape, r, filtro):
""" Função auxiliar que cria a mascara para os filtros de passa alta e baixa ideal
Args:
shape (tuple): shape da imagem no domínio de fourier
r (int): raio de distância do centro da imagem onde será definido o filtro
filtro(int): tipo de filtro FT_FPB_IDEAL ou FT_FPA_IDEAL
Returns:
(np.ndarray): mascará de acordo com o filtro ideal a ser processado
"""
rows, cols = shape
cy, cx = rows // 2, cols // 2
y, x = np.ogrid[:rows, :cols]
reg = np.sqrt((y - cy) ** 2 + (x - cx) ** 2)
if not r:
r = min(cx, cy, cols - cx, rows - cy)
if filtro == FT_FPB_IDEAL:
reg_mask = reg <= r
else:
reg_mask = reg > r
return reg_mask
def realcar(np_raster, np_filtrada):
""" Função auxiliar que incrementa um imagem filtrada de passa alta a imagem original
Args:
np_raster (np.ndarray): imagem original
np_filtrada (np.ndarray): imagem com filtro de passa alta
Returns:
(np.ndarray): imagem realçada
"""
return np_raster + np_filtrada
def alto_reforco(np_raster, np_filtrada, k=1):
""" Função auxiliar que executa o processo de alto_reforço
Args:
np_raster (np.ndarray): imagem original
np_filtrada (np.ndarray): imagem filtrada
k (float): constante que multiplica mascara de nitidez
Returns:
(np.ndarray): imagem resultante do alto reforço
"""
mask_nitidez = np_raster - np_filtrada
return np_raster + k * mask_nitidez
def float2uint(np_raster, dtype):
""" Função auxiliar para converter imagens em float, resultantes de filtros, para uint8 ou uint16
Args:
np_raster (np.ndarray): imagem em float
dtype (np.dtype): dtype a ser convertido
Returns:
(np.ndarray): imagem em uint8 ou uint16
"""
if dtype == np.uint16:
return img_as_uint(np_raster)
elif dtype == np.uint8:
return img_as_ubyte(np_raster)
print('Datatype não reconhecido')
return None
def filtrar(raster, filtro, params, alto_ref=False, realce=False, path=None, nome=None):
""" Executa um filtro em um raster georreferenciado. O raster é convertido em uma imagem não georreferenciada,
onde é executado o filtro e depois é criado novamente em raster georreferenciado.
filtros disponíveis: correção gama, equalização do histograma, mediana, gaussiano, sobel, laplace,
passa baixa ideal (domínio da frequencia) e passa alta ideal (domínio da frequencia).
Possível utilizar alto reforço (high boost) nos filtros de mediana, gaussiano e passa baixa ideal (alto_ref=True).
Possível utilizar realce nos filtros de passa baixa (realce=True).
parametros disponíveis por filtro:
- FT_GAMA: 'gamma', 'gain'
- FT_MEDIANA: 'kernel'
- FT_GAUSS: 'sigma'
- FT_LAPLACE: 'lpc_oper'
- FT_FPB_IDEAL e FT_FPA_IDEAL: 'ft_raio'
Args:
raster(gdal.Dataset): Raster gdal a ser filtrado
filtro(int): Código do filtro disponível. Utilizar as constantes FT_...
params(dict): Dicionário com nome e valores dos parametros do filtro a ser processado
alto_ref(bool): Executar alto reforço com filtros de passa baixa
realce(bool): Executar realce com filtros de passa alta
path (str): diretório do arquivo raster resultante
nome (str): nome do arquivo raster resultante
Returns:
(gdal.Dataset): Raster gdal filtrado
"""
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
raster_count = raster.RasterCount
np_raster = gdal2nparray(raster)
if np_raster is None:
return None
if not params:
params = dict()
np_filtrada = None
if filtro == FT_GAMA:
extra = 'ft_gama'
# ToDo: confirmar se funciona em imagem composta diretamente
gamma = params['gamma'] if 'gamma' in params else 1
if gamma < 0:
print('Valor de gamma não pode ser negativo')
return None
gain = params['gain'] if 'gain' in params else 1
np_filtrada = adjust_gamma(np_raster, gamma, gain)
elif filtro == FT_EQLHIST:
extra = 'ft_equal_hist'
np_mask = create_npmask(np_raster, raster.GetRasterBand(1).GetNoDataValue())
if raster_count > 1:
aux = list()
for i in range(raster_count):
np_band = np_raster[..., i]
aux.append(equalize_hist(np_band, np_band.max(), np_mask[..., i]))
np_filtrada = np.dstack(aux)
else:
np_filtrada = equalize_hist(np_raster, np_raster.max(), np_mask)
np_filtrada = float2uint(np_filtrada, np_raster.dtype)
elif filtro == FT_MEDIANA:
extra = 'ft_mediana'
np_mask = create_npmask(np_raster, raster.GetRasterBand(1).GetNoDataValue())
shape = params['kernel'] if 'kernel' in params else (3, 3)
if type(shape) is not tuple:
print('tipo de dado do kernel inválido')
return None
else:
if len(shape) != 2:
print('Shape do kernel inválido. exemplo correto: (x, y)')
return None
selem = np.ones(shape)
aux = list()
if raster_count > 1:
for i in range(raster_count):
aux.append(median(np_raster[..., i], selem, mask=np_mask[..., i]))
np_filtrada = np.dstack(tuple(aux))
else:
np_filtrada = median(np_raster, selem, mask=np_mask)
if alto_ref:
extra += '_alto_ref'
np_filtrada = alto_reforco(np_raster, np_filtrada)
elif filtro == FT_GAUSS:
extra = 'ft_gauss'
if raster.RasterCount > 1:
multichannel = True
else:
multichannel = False
sigma = params['sigma'] if 'sigma' in params else 1
np_filtrada = gaussian(np_raster, sigma, multichannel=multichannel, preserve_range=True)
if alto_ref:
extra += '_alto_ref'
np_filtrada = alto_reforco(np_raster, np_filtrada)
elif filtro == FT_SOBEL:
extra = 'ft_sobel'
np_mask = create_npmask(np_raster, raster.GetRasterBand(1).GetNoDataValue())
if raster_count > 1:
aux = list()
for i in range(raster_count):
aux.append(sobel(np_raster[..., i], np_mask[..., i]))
np_filtrada = np.dstack(tuple(aux))
else:
np_filtrada = sobel(np_raster, np_mask)
np_filtrada = float2uint(np_filtrada, np_raster.dtype)
if realce:
extra += '_realce'
np_filtrada = realcar(np_raster, np_filtrada)
elif filtro == FT_LAPLACE:
extra = 'ft_laplace'
np_mask = create_npmask(np_raster, raster.GetRasterBand(1).GetNoDataValue())
ksize = params['lpc_oper'] if 'lpc_oper' in params else 3
if raster_count > 1:
aux = list()
for i in range(raster_count):
aux.append(laplace(np_raster[..., i], ksize, np_mask[..., i]))
np_filtrada = np.dstack(tuple(aux))
else:
np_filtrada = laplace(np_raster, ksize, np_mask)
lpc_min = np_filtrada.min()
if lpc_min < 0:
np_filtrada = np_filtrada - lpc_min
if realce:
extra += '_realce'
np_filtrada = realcar(np_raster, np_filtrada)
elif filtro == FT_FPB_IDEAL or FT_FPA_IDEAL:
extra = 'ft_fourier'
if filtro == FT_FPB_IDEAL:
extra += '_pb_ideal'
else:
extra += '_pa_ideal'
r = params['ft_raio'] if 'ft_raio' in params else None
mask_reg = create_fmask(np_raster[..., 0].shape, r, filtro)
rst_ft = np.fft.fft2(np_raster)
rst_ftst = fftshift(rst_ft)
rst_ftst[mask_reg] = 0
rst_r_ft = np.fft.ifftshift(rst_ftst)
np_filtrada = np.abs(np.fft.ifft2(rst_r_ft))
r, c, b = np.where(np_raster == 0)
np_filtrada[r, c, b] = 0
if realce:
extra += '_realce'
np_filtrada = realcar(np_raster, np_filtrada)
if alto_ref:
extra += '_alto_ref'
np_filtrada = alto_reforco(np_raster, np_filtrada)
else:
print('Erro: filtro não reconhecido')
return None
if np_filtrada is not None:
# converter em raster
col = raster.RasterXSize
row = raster.RasterYSize
geo_transf = raster.GetGeoTransform()
proj = raster.GetProjection()
dtype = raster.GetRasterBand(1).DataType
dest = cria_destino(path, nome, raster.GetDescription(), extra=extra)
driver = gdal.GetDriverByName(TIFF)
raster_filtrada = driver.Create(dest, col, row, raster_count, dtype, ['PHOTOMETRIC=RGB'])
# adicionando as informações geográficas
raster_filtrada.SetGeoTransform(geo_transf)
raster_filtrada.SetProjection(proj)
# escrevendo os dados das bandas no raster
if raster_count == 1:
raster_filtrada.GetRasterBand(1).WriteArray(np_filtrada)
else:
for i in range(raster_count):
raster_filtrada.GetRasterBand(i + 1).WriteArray(np_filtrada[..., i])
# atualizando as alterações no raster
raster_filtrada.FlushCache()
return raster_filtrada
print('Erro no filtro')
return None
def series2array(df, cols_ignore):
"""
Args:
df(pd.DataFrame):
cols_ignore(Union[str, list]):
Returns:
"""
if type(cols_ignore) is str:
cols_ignore = [cols_ignore]
for col in df.columns:
if col not in cols_ignore:
df[col] = df[col].map(lambda x: np.array(x))
return df
def prepara_features(df, class_col='classe'):
"""
Args:
df (pd.DataFrame):
class_col (str):
Returns:
(list, pd.Series)
"""
target = df.pop(class_col)
data_flatted = [np.concatenate(x).ravel().tolist() for x in df.values]
return data_flatted, target.values
def treinar_modelo(df_train, tipo_class, df_eval=None, class_col='classe', id_col=None, val_split=0.75,
classificador=None, params=None):
"""
Ajusta um modelo de classificação a partir de uma base de conhecimento, podendo ser uma Rede Neural Artificial (RNA)
-Multilayer Perceptron- ou Máquina de Vetores de Suporte (SVM).
parametros possíveis de cada classificador (utilizando o dicicionário params):
- RNA:
- hidden_units: lista com a quantidade de nós em cada layer. exemplo: [50, 50, 50], default: [10]
- learning rate: valor de aprendizado do algoritmo otimizador. default: 0.01
- steps: repetições do treinamento (backpropagation). Padrão 10000
- SVM:
- kernel: tipo de kernel trick (linear, poly, rbg, sigmoid). default: linear
- degree: graus do kernel trick polinomial (poly). default: 3
- gamma: coeficiente do kernel (auto, scale)
- coef0: termo idependente dos kernel tricks sigmoid e poly
- tol: tolerancia para o critério de parada. default: 1e-3
Args:
df_train (pd.DataFrame): base de dados para treinamento
tipo_class (int): tipo de classificador (rh.RNA, rh.SVM)
df_eval (pd.DataFrame): base de dados para teste (Opcional)
class_col (str): nome da coluna com as classes
id_col (str): nome da coluna de identificação única de cada tupla de dado, se houver
val_split(float): valor utilizado para dividir a base de treinamento, caso não houver uma base para testes
classificador (Union[tf.estimator.DNNClassifier, svm.SVC]): classificador para treinar em uma nova base
params (dict): parametros para criação dos classificadores
Returns:
"""
# checando erros
if type(df_train) is not pd.DataFrame:
print('Erro: erro do tipo da DataFrame')
return None
accepted_params = ['learning_rate', 'hidden_units', 'steps', 'kernel', 'degree', 'gamma', 'coef0', 'tol']
if not params:
params = dict()
else:
for key in params:
if key not in accepted_params:
print('Parametro não reconhecido')
return None
# copiando dataframe de treinamento
df_train_c = df_train.copy()
# retirando coluna de id único caso tenha no dataframe
if id_col:
df_train_c.pop(id_col)
df_train_c = series2array(df_train_c, class_col)
if df_eval is not None:
df_eval_c = df_eval.copy()
if id_col:
df_eval_c.pop(id_col)
df_eval_c = series2array(df_eval_c, class_col)
else:
if 0 < val_split < 1:
all_data = df_train_c.sample(len(df_train_c)).reset_index(drop=True)
split = int(len(df_train_c) * val_split)
df_train_c = all_data.iloc[:split].reset_index(drop=True)
df_eval_c = all_data.iloc[split:].reset_index(drop=True)
else:
print('Sem dados para avaliar')
return None
data_train_flatted, target = prepara_features(df_train_c, class_col)
data_eval_flatted, target_ev = prepara_features(df_eval_c, class_col)
if tipo_class == RNA:
# salvando labels e quantidade
lbs = np.sort(np.unique(target))
n_classes = len(lbs)
# readequando nivel de labels para 0 -> n_classes - 1
lbs_map = list(enumerate(lbs))
lbs_dic = dict()
for lb in lbs_map:
lbs_dic[lb[1]] = lb[0]
lbs_dic_ = dict(zip(lbs_dic.values(), lbs_dic.keys()))
for i in range(len(target)):
target[i] = lbs_dic[target[i]]
for i in range(len(target_ev)):
target_ev[i] = lbs_dic[target_ev[i]]
# transformando em Tensor
ds_train = tf.data.Dataset.from_tensor_slices((data_train_flatted, target))
ds_train = ds_train.shuffle(buffer_size=len(df_train_c))
ds_eval = tf.data.Dataset.from_tensor_slices((data_eval_flatted, target_ev))
ds_eval = ds_eval.batch(1)
# salvando o shape das features e criando entrada da RNA
shape_ftr = tf.compat.v1.data.get_output_shapes(ds_train)
shape_ftr = (shape_ftr[0].num_elements(), shape_ftr[1].num_elements())
aux = len(df_train_c) // 10
batch_size = aux if aux > 0 else 1
ds_train = ds_train.batch(batch_size)
feature_col = [tf.feature_column.numeric_column(key='x', shape=shape_ftr)]
def train_input_fn():
def gen1(a, b):
return {'x': a}, b
ds = ds_train.map(gen1)
itr = tf.compat.v1.data.make_one_shot_iterator(ds)
data, labels = itr.get_next()
return data, labels
def predict_input_fn():
def gen1(a, b):
return {'x': a}, b
ds = ds_eval.map(gen1)
itr = tf.compat.v1.data.make_one_shot_iterator(ds)
data, _ = itr.get_next()
return data, None
if classificador is None:
learning_rate = params['learning_rate'] if 'learning_rate' in params else 0.01
classificador = tf.estimator.DNNClassifier(
hidden_units=params['hidden_units'] if 'hidden_units' in params else [10],
n_classes=n_classes,
feature_columns=feature_col,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
)
if type(classificador) is tf.estimator.DNNClassifier:
steps = params['steps'] if 'steps' in params else 10000
classificador.train(train_input_fn, steps=steps)
target_pr = np.array([pr['class_ids'][0] for pr in classificador.predict(predict_input_fn)])
# retornando o valor original dos labels
for i in range(len(target_pr)):
target_pr[i] = lbs_dic_[target_pr[i]]
for i in range(len(target_ev)):
target_ev[i] = lbs_dic_[target_ev[i]]
else:
print('Erro: tipo de classificador não reconhecido')
return None
elif tipo_class == SVM:
if classificador is None:
classificador = svm.SVC(
kernel=params['kernel'] if 'kernel' in params else 'linear',
degree=params['degree'] if 'degree' in params else 3,
gamma=params['gamma'] if 'gamma' in params else 'auto',
coef0=params['coef0'] if 'coef0' in params else 0.0,
tol=params['tol'] if 'tol' in params else 1e-3
)
if type(classificador) is svm.SVC:
classificador.fit(data_train_flatted, target)
target_pr = classificador.predict(data_eval_flatted)
else:
print('Erro: tipo de classificador não reconhecido')
return None
else:
print('Erro: algoritmo de aprendizado não reconhecido')
return None
avaliacao = {
'accuracy': metrics.accuracy_score(target_ev, target_pr),
'balanced_accuracy': metrics.balanced_accuracy_score(target_ev, target_pr),
'precision_micro': metrics.precision_score(target_ev, target_pr, average='micro'),
'recall_micro': metrics.recall_score(target_ev, target_pr, average='micro'),
'f1_micro': metrics.f1_score(target_ev, target_pr, average='micro'),
'precision': metrics.precision_score(target_ev, target_pr, average='macro'),
'recall': metrics.recall_score(target_ev, target_pr, average='macro'),
'f1': metrics.f1_score(target_ev, target_pr, average='macro'),
'brier_score_loss': metrics.brier_score_loss(target_ev, target_pr),
'confusion_matrix': metrics.confusion_matrix(target_ev, target_pr)
}
if tipo_class == SVM:
return classificador, avaliacao
if tipo_class == RNA:
return classificador, avaliacao, lbs_dic_
def classificar(raster, mask, caracteristicas, classificador, path=None, nome=None, lbs_dict=None):
"""
Executa o processo de classificação do raster, em que cada pixel da região assumirá o valor da classe calculada
Necessário passar o classificador construído na função treinar_modelo
Args:
raster (gdal.Dataset): raster a ser extraída as características e classificada
mask (gdal.Dataset): raster máscara que define as regiões segmentadas
caracteristicas (list): lista com nome das características a serem extraídas
classificador (Union[tf.estimator.DNNClassifier, svm.SVC]):
path (str): diretório que será salvo o raster classificado
nome (str): nome do arquivo que será salvo o raster classificado
lbs_dict (dict): dicionário de conversão dos labels para uso do classificador RNA
Returns:
"""
if path:
if not os.path.exists(path):
print('Diretório não existe')
return None
dest = cria_destino(path, nome, raster.GetDescription(), extra='class')
if type(classificador) is not tf.estimator.DNNClassifier and type(classificador) is not svm.SVC:
print('Erro do classificador')
return None
if not caracteristicas:
print('Necessário lista de características')
return None
# checando e preparando raster
raster_t = type(raster)
if raster_t is gdal.Dataset:
bandas = get_bandas(raster)
# readasarray converte todas 3 bandas
for i in range(len(bandas)):
bandas[i] = bandas[i].ReadAsArray()
np_raster = np.dstack(tuple(bandas))
# np_raster = gdal2nparray(bandas)
else:
print('Erro no tipo')
return None
# checando o preparando mask
mask_t = type(mask)
if mask_t is gdal.Dataset:
mask_b = get_bandas(mask)
np_mask = mask_b.ReadAsArray()
else:
print('Erro no tipo')
return None
features_ext = extrair_caracteristicas(np_raster, np_mask, caracteristicas)
if features_ext is None:
print('Erro na extração')
return None
features, reg = prepara_features(features_ext, 'reg')
if type(classificador) is tf.estimator.DNNClassifier:
if not lbs_dict:
print('Necessário dicionário de labels')
return None
elif not len(lbs_dict):
print('Dicionário de labels vazio')
return None
ds_eval = tf.data.Dataset.from_tensor_slices(features)
ds_eval = ds_eval.batch(1)
def predict_input_fn():
def gen1(a):
return {'x': a}
ds = ds_eval.map(gen1)
itr = tf.compat.v1.data.make_one_shot_iterator(ds)
data = itr.get_next()
return data, None
target_pr = np.array([pr['class_ids'][0] for pr in classificador.predict(predict_input_fn)])
# corrigindo ao label correto
for i in range(len(target_pr)):
target_pr[i] = lbs_dict[target_pr[i]]
elif type(classificador) is svm.SVC:
target_pr = classificador.predict(features)
col = mask.RasterXSize
row = mask.RasterYSize
geo_transf = mask.GetGeoTransform()
proj = mask.GetProjection()
driver = gdal.GetDriverByName(TIFF)
raster_class = driver.Create(dest, col, row, 1, gdal.GDT_Byte)
# adicionando as informações geográficas
raster_class.SetGeoTransform(geo_transf)
raster_class.SetProjection(proj)
np_class = np.copy(np_mask)
for i in range(len(reg)):
rows, cols = np.where(np_class == reg[i])
np_class[rows, cols] = target_pr[i]
# escrevendo os dados das bandas no raster
raster_class.GetRasterBand(1).WriteArray(np_class)
# atualizando as alterações no raster
raster_class.FlushCache()
return raster_class
| 33.345477
| 160
| 0.630062
|
017f68615d497831e95bdffab1a50a53682a9407
| 8,001
|
py
|
Python
|
docs/conf.py
|
GuineaPiet/oauthlib
|
a8eff9c140eb45982c34c6555a93855851d09388
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
GuineaPiet/oauthlib
|
a8eff9c140eb45982c34c6555a93855851d09388
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
GuineaPiet/oauthlib
|
a8eff9c140eb45982c34c6555a93855851d09388
|
[
"BSD-3-Clause"
] | 1
|
2021-10-03T14:53:40.000Z
|
2021-10-03T14:53:40.000Z
|
# -*- coding: utf-8 -*-
#
# OAuthLib documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 12 09:37:24 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OAuthLib'
copyright = u'2012, Idan Gazit and the Python Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from oauthlib import __version__ as v
version = v[:3]
# The full version, including alpha/beta/rc tags.
release = v
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'OAuthLibdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'OAuthLib.tex', u'OAuthLib Documentation',
u'Idan Gazit and the Python Community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'oauthlib', u'OAuthLib Documentation',
[u'Idan Gazit and the Python Community'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OAuthLib', u'OAuthLib Documentation',
u'Idan Gazit and the Python Community', 'OAuthLib', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
linkcheck_ignore = ["https://github.com/oauthlib/oauthlib/issues/new"]
| 32.392713
| 90
| 0.717285
|
a3799bb19587c6d7a36c074db17ce3adc3b2cbfa
| 221
|
py
|
Python
|
mathics/version.py
|
rjalif199/Mathics
|
be0f08be246284489fab84fcd507f4bb3a1ba098
|
[
"Apache-2.0"
] | null | null | null |
mathics/version.py
|
rjalif199/Mathics
|
be0f08be246284489fab84fcd507f4bb3a1ba098
|
[
"Apache-2.0"
] | null | null | null |
mathics/version.py
|
rjalif199/Mathics
|
be0f08be246284489fab84fcd507f4bb3a1ba098
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is suitable for sourcing inside POSIX shell as
# well as importing into Python. That's why there is no
# space around "=" below.
__version__="2.0.0dev" # noqa
| 24.555556
| 58
| 0.687783
|
4a1639c03c4272df2e3f1348fd446f04d01a8fa0
| 1,430
|
py
|
Python
|
examples/pylab_examples/pcolor_demo.py
|
SoftwareDev/mat-plot-lib
|
abaf94859d5ef6e653a4d8a7ce2c59cea1724a57
|
[
"MIT",
"BSD-3-Clause"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
lib/mpl_examples/pylab_examples/pcolor_demo.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 7
|
2015-05-08T19:36:25.000Z
|
2015-06-30T15:32:17.000Z
|
lib/mpl_examples/pylab_examples/pcolor_demo.py
|
yingkailiang/matplotlib
|
255a79b106c98c1904489afe6a754e4d943179d6
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2015-06-05T03:34:06.000Z
|
2022-01-25T09:07:10.000Z
|
"""
Demonstrates similarities between pcolor, pcolormesh, imshow and pcolorfast
for drawing quadrilateral grids.
"""
import matplotlib.pyplot as plt
import numpy as np
# make these smaller to increase the resolution
dx, dy = 0.15, 0.05
# generate 2 2d grids for the x & y bounds
y, x = np.mgrid[slice(-3, 3 + dy, dy),
slice(-3, 3 + dx, dx)]
z = (1 - x / 2. + x ** 5 + y ** 3) * np.exp(-x ** 2 - y ** 2)
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = -np.abs(z).max(), np.abs(z).max()
plt.subplot(2, 2, 1)
plt.pcolor(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolor')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 2)
plt.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolormesh')
# set the limits of the plot to the limits of the data
plt.axis([x.min(), x.max(), y.min(), y.max()])
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(z, cmap='RdBu', vmin=z_min, vmax=z_max,
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest', origin='lower')
plt.title('image (interp. nearest)')
plt.colorbar()
ax = plt.subplot(2, 2, 4)
ax.pcolorfast(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
plt.title('pcolorfast')
plt.colorbar()
plt.show()
| 25.087719
| 75
| 0.632168
|
16e3b061e6b3afed626aeddf839c78cf32674e44
| 9,396
|
py
|
Python
|
tests/get_genotype_all_vars_tests.py
|
hsiaoyi0504/cptac
|
296978a9a7ea0f298490984d8ffe10fb92b5ab34
|
[
"Apache-2.0"
] | 53
|
2019-05-30T02:05:04.000Z
|
2022-03-16T00:38:58.000Z
|
tests/get_genotype_all_vars_tests.py
|
hsiaoyi0504/cptac
|
296978a9a7ea0f298490984d8ffe10fb92b5ab34
|
[
"Apache-2.0"
] | 20
|
2020-02-16T23:50:43.000Z
|
2021-09-26T10:07:59.000Z
|
tests/get_genotype_all_vars_tests.py
|
hsiaoyi0504/cptac
|
296978a9a7ea0f298490984d8ffe10fb92b5ab34
|
[
"Apache-2.0"
] | 17
|
2019-09-27T20:55:09.000Z
|
2021-10-19T07:18:06.000Z
|
# Copyright 2018 Samuel Payne sam_payne@byu.edu
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import cptac
import cptac.utils as ut
def print_test_result(PASS):
"""Prints the result of a test, based on a bool.
Parameters:
PASS (bool): Whether or not the test passed.
"""
if PASS:
print('\tPASS')
else:
print('\tFAIL\n')
def check_returned_is_df(returned):
"""Checks that an object is a dataframe. Prints a specific message if it's actually None, or a general message if it's something else.
Parameters:
returned: The object to test
Returns:
bool: Indicates whether the object was a dataframe.
"""
if returned is None:
print("Function under test returned None.")
return False
if not isinstance(returned, pd.DataFrame):
print("Returned object was not a dataframe. Type of object: {}".format(type(returned)))
return False
return True
def check_df_shape(df, exp_shape):
"""Checks that a dataframe has the proper shape.
Parameters:
df (pandas.core.frame.DataFrame): The dataframe to test.
exp_shape (tuple): A tuple with two elements. First element is expected number of rows, second is expected number of columns.
Returns:
bool: Indicates whether the dataframe had the proper shape.
"""
act_shape = df.shape
if exp_shape != act_shape:
print("Dataframe dimensions did not match expected values.\n\tExpected: {}\n\tActual: {}\n".format(exp_shape, act_shape))
return False
return True
def check_getter(df, exp_dim, exp_headers, coordinates, values):
"""Test a dataframe's dimensions and headers, and three test values, then print whether it passed the test.
Parameters
df: the dataframe gotten by the getter we are testing
exp_dim: a tuple containing the expected dimensions of the dataframe, in the format (rows, columns)
exp_headers: if the dataframe has up to 20 columns, all of the headers for the dataframe, in order. If it has more than 20 columns, then a list containing the first ten and last ten headers, in order.
coordinates: a tuple with three elements, each element being a tuple with two elements, the first element being the int index of the row of a test value, and the second element being the int index of the column of a test value
values: a tuple with three elements, each element being the expected value of the test value corresponding to the coordinates at the same index in the coordinates parameter
Returns
bool indicating if the dataframe had the correct data.
"""
PASS = True
# Check that df is a dataframe, not None or something else.
if not check_returned_is_df(df):
return False # End test, because other tests will be useless.
# Check dimensions
if not check_df_shape(df, exp_dim):
PASS = False
# Check headers
act_headers_all = list(df.columns.values)
if len(df.columns.values) <= 20:
act_headers = act_headers_all
else:
act_headers = act_headers_all[:10] + act_headers_all[-10:]
if len(exp_headers) != len(act_headers):
print("Unexpected number of test headers in dataframe. Expected number of headers: {}. You passed {} headers.\n".format(len(act_headers), len(exp_headers)))
PASS = False
else:
for i, header in enumerate(exp_headers):
if header != act_headers[i]:
print("Dataframe header did not match expected value.\n\tExpected: {}\n\tActual: {}\n".format(header, act_headers[i]))
PASS = False
# Check test values
act_values = [
df.iloc[coordinates[0][0], coordinates[0][1]],
df.iloc[coordinates[1][0], coordinates[1][1]],
df.iloc[coordinates[2][0], coordinates[2][1]]]
for i, value in enumerate(values):
if act_values[i] != value:
print("Dataframe value did not match expected value.\n\tColumn: {}\n\tIndex: {}\n\tExpected: {}\n\tActual: {}\n".format(df.columns.values[coordinates[i][1]], df.index.values[coordinates[i][0]], value, act_values[i]))
PASS = False
# Return whether the dataframe passed the test
return PASS
def test_genotype_ccrcc_KRAS():
# test when there is no data in the somatic mutations df for a gene
print('Running test_genotype_ccrcc_KRAS...')
df = k.get_genotype_all_vars('KRAS')
dimensions = (110, 2)
headers = ['KRAS', 'Mutation']
# get index (int) of patient_ID
index_1 = df.index.get_loc('C3L-00010') # Test No_Mutation
index_2 = df.index.get_loc('C3L-01560')
index_3 = df.index.get_loc('C3N-00646')
index_4 = df.index.get_loc('C3L-00800') # No del vals (test more No_Mutation)
index_5 = df.index.get_loc('C3L-01281')
index_6 = df.index.get_loc('C3N-00154')
index_7 = df.index.get_loc('C3N-00492') # Test Amp
index_8 = df.index.get_loc('C3L-01287')
index_9 = df.index.get_loc('C3N-00852')
# Test No_Mutation
test_coord_1 = ((index_1, 1), (index_2, 1), (index_3, 1)) # C3N-01515
test_vals_1 = ('No_Mutation', 'No_Mutation', 'No_Mutation')
test_coord_2 = ((index_4, 1),(index_5, 1),(index_6, 1))
test_vals_2 = ('No_Mutation', 'No_Mutation', 'No_Mutation')
# Test Amp
test_coord_3 = ((index_7, 1), (index_8, 1), (index_9, 1))
test_vals_3 = ('Amplification', 'Amplification', 'Amplification')
test_coord_vals = [(test_coord_1, test_vals_1), (test_coord_2, test_vals_2),
(test_coord_3, test_vals_3)]
for coord, vals in test_coord_vals:
PASS = check_getter(df, dimensions, headers, coord, vals)
print_test_result(PASS)
def test_genotype_gbm_KRAS():
# test when there is no data in the somatic mutations df for a gene
print('Running test_genotype_gbm_KRAS...')
df = g.get_genotype_all_vars('KRAS')
dimensions = (98, 2)
headers = ['KRAS', 'Mutation']
# get index (int) of patient_ID
index_1 = df.index.get_loc('C3N-03473') # Test No_Mutation
index_2 = df.index.get_loc('C3N-03183')
index_3 = df.index.get_loc('C3N-01515')
index_4 = df.index.get_loc('C3L-01049') # Test Del (only 2)
index_5 = df.index.get_loc('C3L-02708')
index_6 = df.index.get_loc('C3N-02256')
index_7 = df.index.get_loc('C3N-01816') # Test Amp
index_8 = df.index.get_loc('C3N-02769')
index_9 = df.index.get_loc('C3N-02784')
# Test No_Mutation
test_coord_1 = ((index_1, 1), (index_2, 1), (index_3, 1)) # C3N-01515
test_vals_1 = ('No_Mutation', 'No_Mutation', 'No_Mutation')
# Test Del (only 2 del)
test_coord_2 = ((index_4, 1),(index_5, 1),(index_6, 1))
test_vals_2 = ('Deletion', 'Deletion', 'No_Mutation')
# Test Amp
test_coord_3 = ((index_7, 1), (index_8, 1), (index_9, 1))
test_vals_3 = ('Amplification', 'Amplification', 'Amplification')
test_coord_vals = [(test_coord_1, test_vals_1), (test_coord_2, test_vals_2),
(test_coord_3, test_vals_3)]
for coord, vals in test_coord_vals:
PASS = check_getter(df, dimensions, headers, coord, vals)
print_test_result(PASS)
def test_genotype_hnscc_KRAS():
# test when there is no data in the somatic mutations df for a gene
print('Running test_genotype_hnscc_KRAS...')
df = h.get_genotype_all_vars('KRAS')
dimensions = (109, 2)
headers = ['KRAS', 'Mutation']
# get index (int) of patient_ID
index_1 = df.index.get_loc('C3L-00999') # Test No_Mutation
index_2 = df.index.get_loc('C3N-01946')
index_3 = df.index.get_loc('C3N-03487')
index_4 = df.index.get_loc('C3N-01337') # Test Del
index_5 = df.index.get_loc('C3N-03012')
index_6 = df.index.get_loc('C3N-03785')
index_7 = df.index.get_loc('C3L-04844') # Test Amp
index_8 = df.index.get_loc('C3L-00987')
index_9 = df.index.get_loc('C3N-03488')
# Test No_Mutation
test_coord_1 = ((index_1, 1), (index_2, 1), (index_3, 1)) # C3N-01515
test_vals_1 = ('No_Mutation', 'No_Mutation', 'No_Mutation')
# Test Del
test_coord_2 = ((index_4, 1),(index_5, 1),(index_6, 1))
test_vals_2 = ('Deletion', 'Deletion', 'Deletion')
# Test Amp
test_coord_3 = ((index_7, 1), (index_8, 1), (index_9, 1))
test_vals_3 = ('Amplification', 'Amplification', 'Amplification')
test_coord_vals = [(test_coord_1, test_vals_1), (test_coord_2, test_vals_2),
(test_coord_3, test_vals_3)]
for coord, vals in test_coord_vals:
PASS = check_getter(df, dimensions, headers, coord, vals)
print_test_result(PASS)
k = cptac.Ccrcc()
g = cptac.Gbm()
h = cptac.Hnscc()
print("\nRunning tests:\n")
test_genotype_ccrcc_KRAS()
test_genotype_gbm_KRAS()
test_genotype_hnscc_KRAS()
print("Version:", cptac.version())
| 38.666667
| 230
| 0.666773
|
7571a54e662189dd244b4e49becb110fc5023519
| 19,793
|
py
|
Python
|
samcli/commands/build/build_context.py
|
awilkins/aws-sam-cli
|
2ace38995ef97120abdbe23939fb9e96c5eb76b1
|
[
"Apache-2.0"
] | null | null | null |
samcli/commands/build/build_context.py
|
awilkins/aws-sam-cli
|
2ace38995ef97120abdbe23939fb9e96c5eb76b1
|
[
"Apache-2.0"
] | 34
|
2020-12-08T21:15:26.000Z
|
2021-05-13T21:21:49.000Z
|
samcli/commands/build/build_context.py
|
awilkins/aws-sam-cli
|
2ace38995ef97120abdbe23939fb9e96c5eb76b1
|
[
"Apache-2.0"
] | 1
|
2022-02-09T01:25:20.000Z
|
2022-02-09T01:25:20.000Z
|
"""
Context object used by build command
"""
import logging
import os
import pathlib
import shutil
from typing import Dict, Optional, List
import click
from samcli.commands.build.exceptions import InvalidBuildDirException, MissingBuildMethodException
from samcli.lib.bootstrap.nested_stack.nested_stack_manager import NestedStackManager
from samcli.lib.build.build_graph import DEFAULT_DEPENDENCIES_DIR
from samcli.lib.intrinsic_resolver.intrinsics_symbol_table import IntrinsicsSymbolTable
from samcli.lib.providers.provider import ResourcesToBuildCollector, Stack, Function, LayerVersion
from samcli.lib.providers.sam_function_provider import SamFunctionProvider
from samcli.lib.providers.sam_layer_provider import SamLayerProvider
from samcli.lib.providers.sam_stack_provider import SamLocalStackProvider
from samcli.lib.utils.osutils import BUILD_DIR_PERMISSIONS
from samcli.local.docker.manager import ContainerManager
from samcli.local.lambdafn.exceptions import ResourceNotFound
from samcli.lib.build.exceptions import BuildInsideContainerError
from samcli.commands.exceptions import UserException
from samcli.lib.build.app_builder import (
ApplicationBuilder,
BuildError,
UnsupportedBuilderLibraryVersionError,
ContainerBuildNotSupported,
)
from samcli.commands._utils.options import DEFAULT_BUILD_DIR
from samcli.lib.build.workflow_config import UnsupportedRuntimeException
from samcli.local.lambdafn.exceptions import FunctionNotFound
from samcli.commands._utils.template import move_template
from samcli.lib.build.exceptions import InvalidBuildGraphException
LOG = logging.getLogger(__name__)
class BuildContext:
def __init__(
self,
resource_identifier: Optional[str],
template_file: str,
base_dir: Optional[str],
build_dir: str,
cache_dir: str,
cached: bool,
parallel: bool,
mode: Optional[str],
manifest_path: Optional[str] = None,
clean: bool = False,
use_container: bool = False,
# pylint: disable=fixme
# FIXME: parameter_overrides is never None, we should change this to "dict" from Optional[dict]
# See samcli/commands/_utils/options.py:251 for its all possible values
parameter_overrides: Optional[dict] = None,
docker_network: Optional[str] = None,
skip_pull_image: bool = False,
container_env_var: Optional[dict] = None,
container_env_var_file: Optional[str] = None,
build_images: Optional[dict] = None,
aws_region: Optional[str] = None,
create_auto_dependency_layer: bool = False,
stack_name: Optional[str] = None,
) -> None:
self._resource_identifier = resource_identifier
self._template_file = template_file
self._base_dir = base_dir
# Note(xinhol): use_raw_codeuri is temporary to fix a bug, and will be removed for a permanent solution.
self._use_raw_codeuri = bool(self._base_dir)
self._build_dir = build_dir
self._cache_dir = cache_dir
self._parallel = parallel
self._manifest_path = manifest_path
self._clean = clean
self._use_container = use_container
self._parameter_overrides = parameter_overrides
# Override certain CloudFormation pseudo-parameters based on values provided by customer
self._global_parameter_overrides: Optional[Dict] = None
if aws_region:
self._global_parameter_overrides = {IntrinsicsSymbolTable.AWS_REGION: aws_region}
self._docker_network = docker_network
self._skip_pull_image = skip_pull_image
self._mode = mode
self._cached = cached
self._container_env_var = container_env_var
self._container_env_var_file = container_env_var_file
self._build_images = build_images
self._create_auto_dependency_layer = create_auto_dependency_layer
self._stack_name = stack_name
self._function_provider: Optional[SamFunctionProvider] = None
self._layer_provider: Optional[SamLayerProvider] = None
self._container_manager: Optional[ContainerManager] = None
self._stacks: List[Stack] = []
def __enter__(self) -> "BuildContext":
self.set_up()
return self
def set_up(self) -> None:
"""Set up class members used for building
This should be called each time before run() if stacks are changed."""
self._stacks, remote_stack_full_paths = SamLocalStackProvider.get_stacks(
self._template_file,
parameter_overrides=self._parameter_overrides,
global_parameter_overrides=self._global_parameter_overrides,
)
if remote_stack_full_paths:
LOG.warning(
"Below nested stacks(s) specify non-local URL(s), which are unsupported:\n%s\n"
"Skipping building resources inside these nested stacks.",
"\n".join([f"- {full_path}" for full_path in remote_stack_full_paths]),
)
# Note(xinhol): self._use_raw_codeuri is added temporarily to fix issue #2717
# when base_dir is provided, codeuri should not be resolved based on template file path.
# we will refactor to make all path resolution inside providers intead of in multiple places
self._function_provider = SamFunctionProvider(self.stacks, self._use_raw_codeuri)
self._layer_provider = SamLayerProvider(self.stacks, self._use_raw_codeuri)
if not self._base_dir:
# Base directory, if not provided, is the directory containing the template
self._base_dir = str(pathlib.Path(self._template_file).resolve().parent)
self._build_dir = self._setup_build_dir(self._build_dir, self._clean)
if self._cached:
cache_path = pathlib.Path(self._cache_dir)
cache_path.mkdir(mode=BUILD_DIR_PERMISSIONS, parents=True, exist_ok=True)
self._cache_dir = str(cache_path.resolve())
dependencies_path = pathlib.Path(DEFAULT_DEPENDENCIES_DIR)
dependencies_path.mkdir(mode=BUILD_DIR_PERMISSIONS, parents=True, exist_ok=True)
if self._use_container:
self._container_manager = ContainerManager(
docker_network_id=self._docker_network, skip_pull_image=self._skip_pull_image
)
def __exit__(self, *args):
pass
def get_resources_to_build(self):
return self.resources_to_build
def run(self):
"""Runs the building process by creating an ApplicationBuilder."""
try:
builder = ApplicationBuilder(
self.get_resources_to_build(),
self.build_dir,
self.base_dir,
self.cache_dir,
self.cached,
self.is_building_specific_resource,
manifest_path_override=self.manifest_path_override,
container_manager=self.container_manager,
mode=self.mode,
parallel=self._parallel,
container_env_var=self._container_env_var,
container_env_var_file=self._container_env_var_file,
build_images=self._build_images,
combine_dependencies=not self._create_auto_dependency_layer,
)
except FunctionNotFound as ex:
raise UserException(str(ex), wrapped_from=ex.__class__.__name__) from ex
try:
build_result = builder.build()
artifacts = build_result.artifacts
stack_output_template_path_by_stack_path = {
stack.stack_path: stack.get_output_template_path(self.build_dir) for stack in self.stacks
}
for stack in self.stacks:
modified_template = builder.update_template(
stack,
artifacts,
stack_output_template_path_by_stack_path,
)
output_template_path = stack.get_output_template_path(self.build_dir)
if self._create_auto_dependency_layer:
LOG.debug("Auto creating dependency layer for each function resource into a nested stack")
nested_stack_manager = NestedStackManager(
self._stack_name, self.build_dir, stack.location, modified_template, build_result
)
modified_template = nested_stack_manager.generate_auto_dependency_layer_stack()
move_template(stack.location, output_template_path, modified_template)
click.secho("\nBuild Succeeded", fg="green")
# try to use relpath so the command is easier to understand, however,
# under Windows, when SAM and (build_dir or output_template_path) are
# on different drive, relpath() fails.
root_stack = SamLocalStackProvider.find_root_stack(self.stacks)
out_template_path = root_stack.get_output_template_path(self.build_dir)
try:
build_dir_in_success_message = os.path.relpath(self.build_dir)
output_template_path_in_success_message = os.path.relpath(out_template_path)
except ValueError:
LOG.debug("Failed to retrieve relpath - using the specified path as-is instead")
build_dir_in_success_message = self.build_dir
output_template_path_in_success_message = out_template_path
msg = self.gen_success_msg(
build_dir_in_success_message,
output_template_path_in_success_message,
os.path.abspath(self.build_dir) == os.path.abspath(DEFAULT_BUILD_DIR),
)
click.secho(msg, fg="yellow")
except (
UnsupportedRuntimeException,
BuildError,
BuildInsideContainerError,
UnsupportedBuilderLibraryVersionError,
ContainerBuildNotSupported,
InvalidBuildGraphException,
) as ex:
click.secho("\nBuild Failed", fg="red")
# Some Exceptions have a deeper wrapped exception that needs to be surfaced
# from deeper than just one level down.
deep_wrap = getattr(ex, "wrapped_from", None)
wrapped_from = deep_wrap if deep_wrap else ex.__class__.__name__
raise UserException(str(ex), wrapped_from=wrapped_from) from ex
@staticmethod
def gen_success_msg(artifacts_dir: str, output_template_path: str, is_default_build_dir: bool) -> str:
invoke_cmd = "sam local invoke"
if not is_default_build_dir:
invoke_cmd += " -t {}".format(output_template_path)
deploy_cmd = "sam deploy --guided"
if not is_default_build_dir:
deploy_cmd += " --template-file {}".format(output_template_path)
msg = """\nBuilt Artifacts : {artifacts_dir}
Built Template : {template}
Commands you can use next
=========================
[*] Invoke Function: {invokecmd}
[*] Test Function in the Cloud: sam sync --stack-name {{stack-name}} --watch
[*] Deploy: {deploycmd}
""".format(
invokecmd=invoke_cmd, deploycmd=deploy_cmd, artifacts_dir=artifacts_dir, template=output_template_path
)
return msg
@staticmethod
def _setup_build_dir(build_dir: str, clean: bool) -> str:
build_path = pathlib.Path(build_dir)
if os.path.abspath(str(build_path)) == os.path.abspath(str(pathlib.Path.cwd())):
exception_message = (
"Failing build: Running a build with build-dir as current working directory "
"is extremely dangerous since the build-dir contents is first removed. "
"This is no longer supported, please remove the '--build-dir' option from the command "
"to allow the build artifacts to be placed in the directory your template is in."
)
raise InvalidBuildDirException(exception_message)
if build_path.exists() and os.listdir(build_dir) and clean:
# build folder contains something inside. Clear everything.
shutil.rmtree(build_dir)
build_path.mkdir(mode=BUILD_DIR_PERMISSIONS, parents=True, exist_ok=True)
# ensure path resolving is done after creation: https://bugs.python.org/issue32434
return str(build_path.resolve())
@property
def container_manager(self) -> Optional[ContainerManager]:
return self._container_manager
@property
def function_provider(self) -> SamFunctionProvider:
# Note(xinhol): despite self._function_provider is Optional
# self._function_provider will be assigned with a non-None value in __enter__() and
# this function is only used in the context (after __enter__ is called)
# so we can assume it is not Optional here
return self._function_provider # type: ignore
@property
def layer_provider(self) -> SamLayerProvider:
# same as function_provider()
return self._layer_provider # type: ignore
@property
def build_dir(self) -> str:
return self._build_dir
@property
def base_dir(self) -> str:
# Note(xinhol): self._base_dir will be assigned with a str value if it is None in __enter__()
return self._base_dir # type: ignore
@property
def cache_dir(self) -> str:
return self._cache_dir
@property
def cached(self) -> bool:
return self._cached
@property
def use_container(self) -> bool:
return self._use_container
@property
def stacks(self) -> List[Stack]:
return self._stacks
@property
def manifest_path_override(self) -> Optional[str]:
if self._manifest_path:
return os.path.abspath(self._manifest_path)
return None
@property
def mode(self) -> Optional[str]:
return self._mode
@property
def resources_to_build(self) -> ResourcesToBuildCollector:
"""
Function return resources that should be build by current build command. This function considers
Lambda Functions and Layers with build method as buildable resources.
Returns
-------
ResourcesToBuildCollector
"""
return (
self.collect_build_resources(self._resource_identifier)
if self._resource_identifier
else self.collect_all_build_resources()
)
@property
def create_auto_dependency_layer(self) -> bool:
return self._create_auto_dependency_layer
def collect_build_resources(self, resource_identifier: str) -> ResourcesToBuildCollector:
"""Collect a single buildable resource and its dependencies.
For a Lambda function, its layers will be included.
Parameters
----------
resource_identifier : str
Resource identifier for the resource to be built
Returns
-------
ResourcesToBuildCollector
ResourcesToBuildCollector containing the buildable resource and its dependencies
Raises
------
ResourceNotFound
raises ResourceNotFound is the specified resource cannot be found.
"""
result = ResourcesToBuildCollector()
# Get the functions and its layer. Skips if it's inline.
self._collect_single_function_and_dependent_layers(resource_identifier, result)
self._collect_single_buildable_layer(resource_identifier, result)
if not result.functions and not result.layers:
# Collect all functions and layers that are not inline
all_resources = [f.name for f in self.function_provider.get_all() if not f.inlinecode]
all_resources.extend([l.name for l in self.layer_provider.get_all()])
available_resource_message = (
f"{resource_identifier} not found. Possible options in your " f"template: {all_resources}"
)
LOG.info(available_resource_message)
raise ResourceNotFound(f"Unable to find a function or layer with name '{resource_identifier}'")
return result
def collect_all_build_resources(self) -> ResourcesToBuildCollector:
"""Collect all buildable resources. Including Lambda functions and layers.
Returns
-------
ResourcesToBuildCollector
ResourcesToBuildCollector that contains all the buildable resources.
"""
result = ResourcesToBuildCollector()
result.add_functions([f for f in self.function_provider.get_all() if BuildContext._is_function_buildable(f)])
result.add_layers([l for l in self.layer_provider.get_all() if BuildContext._is_layer_buildable(l)])
return result
@property
def is_building_specific_resource(self) -> bool:
"""
Whether customer requested to build a specific resource alone in isolation,
by specifying function_identifier to the build command.
Ex: sam build MyServerlessFunction
:return: True if user requested to build specific resource, False otherwise
"""
return bool(self._resource_identifier)
def _collect_single_function_and_dependent_layers(
self, resource_identifier: str, resource_collector: ResourcesToBuildCollector
) -> None:
"""
Populate resource_collector with function with provided identifier and all layers that function need to be
build in resource_collector
Parameters
----------
resource_collector: Collector that will be populated with resources.
"""
function = self.function_provider.get(resource_identifier)
if not function:
# No function found
return
resource_collector.add_function(function)
resource_collector.add_layers([l for l in function.layers if l.build_method is not None])
def _collect_single_buildable_layer(
self, resource_identifier: str, resource_collector: ResourcesToBuildCollector
) -> None:
"""
Populate resource_collector with layer with provided identifier.
Parameters
----------
resource_collector
Returns
-------
"""
layer = self.layer_provider.get(resource_identifier)
if not layer:
# No layer found
return
if layer and layer.build_method is None:
LOG.error("Layer %s is missing BuildMethod Metadata.", self._function_provider)
raise MissingBuildMethodException(f"Build method missing in layer {resource_identifier}.")
resource_collector.add_layer(layer)
@staticmethod
def _is_function_buildable(function: Function):
# no need to build inline functions
if function.inlinecode:
LOG.debug("Skip building inline function: %s", function.full_path)
return False
# no need to build functions that are already packaged as a zip file
if isinstance(function.codeuri, str) and function.codeuri.endswith(".zip"):
LOG.debug("Skip building zip function: %s", function.full_path)
return False
return True
@staticmethod
def _is_layer_buildable(layer: LayerVersion):
# if build method is not specified, it is not buildable
if not layer.build_method:
LOG.debug("Skip building layer without a build method: %s", layer.full_path)
return False
# no need to build layers that are already packaged as a zip file
if isinstance(layer.codeuri, str) and layer.codeuri.endswith(".zip"):
LOG.debug("Skip building zip layer: %s", layer.full_path)
return False
return True
| 41.149688
| 117
| 0.672561
|
5edefe6916bdc070737c48468f9b0b920c78902e
| 19,333
|
py
|
Python
|
src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_06_01/v2019_06_01/operations/_express_route_circuit_connections_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_06_01/v2019_06_01/operations/_express_route_circuit_connections_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_06_01/v2019_06_01/operations/_express_route_circuit_connections_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitConnectionsOperations(object):
"""ExpressRouteCircuitConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-06-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified Express Route Circuit Connection from the
specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def get(
self, resource_group_name, circuit_name, peering_name, connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified Express Route Circuit Connection from the specified
express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitConnection or ClientRawResponse if
raw=true
:rtype:
~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnection
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, peering_name, connection_name, express_route_circuit_connection_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(express_route_circuit_connection_parameters, 'ExpressRouteCircuitConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, peering_name, connection_name, express_route_circuit_connection_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a Express Route Circuit Connection in the specified
express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the express route circuit
connection.
:type connection_name: str
:param express_route_circuit_connection_parameters: Parameters
supplied to the create or update express route circuit connection
operation.
:type express_route_circuit_connection_parameters:
~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitConnection or
ClientRawResponse<ExpressRouteCircuitConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
connection_name=connection_name,
express_route_circuit_connection_parameters=express_route_circuit_connection_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections/{connectionName}'}
def list(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets all global reach connections associated with a private peering in
an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuitConnection
:rtype:
~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnectionPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRouteCircuitConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ExpressRouteCircuitConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/connections'}
| 48.944304
| 224
| 0.681012
|
afdb1ef1e316ad9e00798277f3f795fbcfc09471
| 2,740
|
py
|
Python
|
documents/aws-doc-sdk-examples/python/example_code/polly/GetLexicon.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | 5
|
2021-08-13T09:20:58.000Z
|
2021-12-16T22:13:54.000Z
|
documents/aws-doc-sdk-examples/python/example_code/polly/GetLexicon.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | null | null | null |
documents/aws-doc-sdk-examples/python/example_code/polly/GetLexicon.py
|
siagholami/aws-documentation
|
2d06ee9011f3192b2ff38c09f04e01f1ea9e0191
|
[
"CC-BY-4.0"
] | null | null | null |
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[GetLexicon.py demonstrates how to produce the content of a specific pronunciation lexicon stored in a AWS Region. ]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon Polly]
# snippet-keyword:[GetLexicon]
# snippet-keyword:[lexicon]
# snippet-service:[polly]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-31]
# snippet-sourceauthor:[ (AWS)]
# snippet-start:[polly.python.GetLexicon.complete]
from argparse import ArgumentParser
from os import path
from tempfile import gettempdir
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
# Define and parse the command line arguments
cli = ArgumentParser(description="GetLexicon example")
cli.add_argument("name", type=str, metavar="LEXICON_NAME")
arguments = cli.parse_args()
# Create a client using the credentials and region defined in the adminuser
# section of the AWS credentials and configuration files
session = Session(profile_name="adminuser")
polly = session.client("polly")
print(u"Fetching {0}...".format(arguments.name))
try:
# Fetch lexicon by name
response = polly.get_lexicon(Name=arguments.name)
except (BotoCoreError, ClientError) as error:
# The service returned an error, exit gracefully
cli.error(error)
# Get the lexicon data from the response
lexicon = response.get("Lexicon", {})
# Access the lexicon's content
if "Content" in lexicon:
output = path.join(gettempdir(), u"%s.pls" % arguments.name)
print(u"Saving to %s..." % output)
try:
# Save the lexicon contents to a local file
with open(output, "w") as pls_file:
pls_file.write(lexicon["Content"])
except IOError as error:
# Could not write to file, exit gracefully
cli.error(error)
else:
# The response didn't contain lexicon data, exit gracefully
cli.error("Could not fetch lexicons contents")
print("Done.")
# snippet-end:[polly.python.GetLexicon.complete]
| 35.128205
| 144
| 0.739051
|
3cc1235f63217878669ba939946c3001cee9b54e
| 14,403
|
py
|
Python
|
full-text-search/topic_scraper.py
|
skyline-ai/census-api
|
7529031043269585eb0ad0d8f34ea62082588c61
|
[
"MIT"
] | 1
|
2019-03-02T23:36:31.000Z
|
2019-03-02T23:36:31.000Z
|
full-text-search/topic_scraper.py
|
citizenlabsgr/2020-census
|
994a839b0181168ef1082b35058173532ead463e
|
[
"MIT"
] | null | null | null |
full-text-search/topic_scraper.py
|
citizenlabsgr/2020-census
|
994a839b0181168ef1082b35058173532ead463e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from HTMLParser import HTMLParser
import psycopg2
import re
import urllib2
class HTMLStripper(HTMLParser):
""" Stripper for HTML tags; simply stores data in self.data. """
def __init__(self):
self.reset()
self.data = []
def handle_data(self, data):
""" Append any non-HTML data to our data list.
Data, by definition, is anything that is not an HTML tag. This is
exactly what we are interested in.
"""
self.data.append(data)
def get_data(self):
""" Return data as a string. """
return ''.join(self.data)
class TopicsParser(HTMLParser):
""" Parser for the main topics page.
Attributes:
in_dt_tag: Flag for whether or not the parser is inside a <dt> tag.
topic_buffer: Buffer to store a single topic name and page URL
base_url: Census Reporter URL
topics: List of topic page dictionaries, each containing
'name' and 'link'
We always encounter, in order, a <dt> tag, an <a> tag, and the topic name.
We can take advantage of this data knowledge in order to build a buffer
that stores a URL, then a topic, which we can then append to our master
list of topics.
"""
def __init__(self):
HTMLParser.__init__(self)
self.in_dt_tag = False
self.topic_buffer = {'name': '', 'url': ''}
self.base_url = "https://censusreporter.org"
self.topics = []
def handle_starttag(self, tag, attrs):
""" Handle <dt> and <a> tags.
If we see a <dt> tag, set the flag appropriately. Census Reporter's
topic page does not nest these tags. If we see a link, build and store
the appropriate URL.
"""
if tag == 'dt':
self.in_dt_tag = True
if self.in_dt_tag and tag == 'a':
topic_url = self.base_url + attrs[0][1]
self.topic_buffer['url'] = topic_url
def handle_endtag(self, tag):
""" Handle </dt> tag by resetting flag. """
if tag == 'dt':
self.in_dt_tag = False
def handle_data(self, data):
""" Find data found in the <dt> tags, which are topic names.
Note that we use topic_buffer.copy() to prevent pointer-like
behavior, and create new dictionaries when appending them.
"""
if self.in_dt_tag:
self.topic_buffer['name'] = data
self.topics.append(self.topic_buffer.copy())
class TopicPageParser(HTMLParser):
""" Parser for an individual topic page.
Attributes:
in_body: Counter for whether or not parser is in main section of page.
This functions more or less like a stack, where we increment
it if we reach a relevant <section> tag, and decrement if we
reach a </section> tag. If it's greater than 0, then we are
in the main body of the page.
text: List to store all the relevant text snippets on the page
tables: Dictionary of table code : annotations pairs, where the table
code represents a table on the page and the annotations are
the annotations next to it
table_codes: List of all table codes.
The main page content is stored in a <section id='topic-overview'> tag
or a <section id='topic-elsewhere'> tag. We take advantage of this to find
the relevant information on the page (and ignore things like scripts or
footers).
"""
def __init__(self, html):
HTMLParser.__init__(self)
self.in_body = 0
self.text = []
self.tables = self.find_all_tables(html)
self.table_codes = self.tables.keys()
def handle_starttag(self, tag, attrs):
""" Handle start tag by detecting main section of page. """
if tag == 'section' and (('id', 'topic-overview') in attrs
or ('id', 'topic-elsewhere') in attrs):
self.in_body += 1
def handle_endtag(self, tag):
""" Handle end tag by detecting end of main section of page. """
if tag == 'section' and self.in_body:
self.in_body -= 1
def handle_data(self, data):
""" Add data to the text buffer. """
if self.in_body:
# Get rid of non-alphanumeric and non-space / dash / slash
# characters, plus newline characters to avoid concatenating lines.
# Then replace the newlines, slashes, and dashes with spaces.
# This is kind of crude, but ultimately all we care about is a
# long document of words.
data = re.sub('[^A-Za-z0-9\-/\n ]', '', data)
data = re.sub('[\n/-]', ' ', data)
self.text.append(data.strip())
def find_all_tables(self, text):
""" Find all table codes in text using regex
Table codes are formatted as [B/C]##### with an optional race iteration
(character A - H) or a Puerto Rico tag (string 'PR' at the end).
Occasionally, there are annotations on the topic pages following the
table code. These are one of the following characters:
‡ - collapsed version exists; 'collapsed'
† - has racial iterations; 'iterations'
§ - has Puerto Rico version; 'puerto_rico'
ª - no core table, only iterations; 'no_core'
"""
# Strip all the HTML tags
stripper = HTMLStripper()
stripper.feed(text.decode('utf-8'))
text = stripper.get_data()
# Find table codes
exp = '([BC]\d{5}[A-H]?P?R?)'
all_tables = re.finditer(exp, text)
# Prepare to find all tables on page
tables_on_page = {}
annotations = { u'‡' : 'collapsed', u'†' : 'iterations',
u'§' : 'puerto_rico', u'ª' : 'no_core' }
for match in all_tables:
code = match.group()
# Add code to tables_on_page if it's not there
if code not in tables_on_page.keys():
tables_on_page[code] = []
# Search for annotations in the four characters after the
# table code (since there are a maximum of four annotations)
end_pos = match.end()
potential_annotations = match.string[end_pos : end_pos + 4]
actual_annotations = []
for char in annotations.keys():
if char in potential_annotations:
actual_annotations.append(char)
# Update tables_on_page with the new annotations, no duplicates
tables_on_page[code] = list(set(tables_on_page[code]
+ actual_annotations))
return tables_on_page
class GlossaryParser(HTMLParser):
""" Parser for the glossary page, censusreporter.org/glossary.
Attributes:
in_body: Flag for whether or not parser is in main section of page.
We don't have to keep a counter as before, because the
glossary page is not structured in a way that there are
nested tags.
in_term_name: Flag for the whether or not the parser is inside a term
name. This allows it to keep a separate list of terms.
terms: List of terms on page.
text: List of text on page.
Once again, we use the fact that the body of the page is enclosed in a tag
<article id='glossary'>. Similarly, term names are always enclosed in <dt>
tags within the body. Upon encountering a term, it is added to the list of
terms. Upon encountering any text (including term names), it is added to
the list of all text.
"""
def __init__(self):
HTMLParser.__init__(self)
self.in_body = False
self.in_term_name = False
self.terms = []
self.text = []
def handle_starttag(self, tag, attrs):
""" Handle start tag by detecting body and term names.
We need to know when we're in the body of the page (again, to avoid
things like scripts or footers) and when we're in a term name (so that
those can be documented with higher priority).
"""
if tag == 'article' and ('id', 'glossary') in attrs:
self.in_body = True
if tag == 'dt':
self.in_term_name = True
def handle_endtag(self, tag):
""" Handle end tag by detecting end of body and term names. """
if tag == 'article' and self.in_body:
self.in_body = False
if tag == 'dt':
self.in_term_name = False
def handle_data(self, data):
""" Handle body text and term names on page.
Add term names found to the list of terms we maintain. Add all text
found to the list of text.
"""
if self.in_body:
data = re.sub('[^A-Za-z0-9\-/\n ]', '', data)
data = re.sub('[\n/-]', ' ', data)
self.text.append(data.strip())
if self.in_term_name:
self.terms.append(data)
def get_list_of_topics():
""" Gets and returns list of topics from Census Reporter website.
Topics are formatted as [{name: topic1, url: url1},
{name: topic2, url: url2}, ...]
"""
url = "https://censusreporter.org/topics"
handle = urllib2.urlopen(url)
html = handle.read()
handle.close()
parser = TopicsParser()
parser.feed(html)
return parser.topics
def scrape_topic_page(name, url):
""" Scrapes a single topic page to get description and list of tables. """
handle = urllib2.urlopen(url)
html = handle.read()
handle.close()
parser = TopicPageParser(html)
parser.feed(html)
text = ' '.join(parser.text)
return text, parser.tables, parser.table_codes
def scrape_glossary_page():
""" Scrapes and returns terms and text found on the glossary page. """
url = "https://censusreporter.org/glossary"
handle = urllib2.urlopen(url)
html = handle.read()
handle.close()
parser = GlossaryParser()
parser.feed(html)
return {'text': ' '.join(parser.text), 'terms': ' '.join(parser.terms) }
def remove_old_topics():
"""" Removes old topics entries from search_metadata. """
# Connect to database
connection = psycopg2.connect("dbname=census user=census")
cur = connection.cursor()
# Remove old entries
q = "DELETE FROM search_metadata WHERE type = 'topic';"
cur.execute(q)
print cur.statusmessage
connection.commit()
cur.close()
connection.close()
return
def add_topics_to_table(topics_data):
""" Adds topics data into the search_metadata table.
Requires that the format be a list of dictionaries, i.e.,
[{name: 'topic1', url: 'url1', table_codes: [tables_in_topic1],
text: '...', tables: {not relevant}},
{name: 'topic2', url: 'url2', table_codes: [tables_in_topic2],
text: '...', tables: {not relevant}},
... ]
"""
# Connect to database
connection = psycopg2.connect("dbname=census user=census")
cur = connection.cursor()
for topic in topics_data:
# Format each "text" entry properly, i.e., &-delimited. We replace spaces
# with &s, but trim whitespace because there may be multiple sequential
# spaces.
topic['text'] = re.sub('\s+', ' ', topic['text'].strip())
topic['text'] = topic['text'].replace(' ', ' & ')
# Update search_metadata accordingly. We set text1 to the topic name,
# text2 to the list of tables, text3 to the URL, and text4 through
# text6 to NULL. The document is made out of the title (first priority)
# and the words scraped (third priority)
q = """INSERT INTO search_metadata
(text1, text2, text3, text4, text5, text6,
type, document)
VALUES ('{0}', '{1}', '{2}', NULL, NULL, NULL, 'topic',
setweight(to_tsvector('{0}'), 'A') ||
setweight(to_tsvector('{3}'), 'C'));""".format(
topic['name'], ' '.join(topic['table_codes']), topic['url'], topic['text'])
cur.execute(q)
print cur.statusmessage
connection.commit()
cur.close()
connection.close()
return
def add_glossary_to_table(glossary):
""" Add glossary data to search_metadata table.
Requires that it be formatted as { terms: [term1, term2, ...], text: '...'}
"""
# Connect to database
connection = psycopg2.connect("dbname=census user=census")
cur = connection.cursor()
# Format text properly, i.e., &-delimited and without multiple spaces
glossary['text'] = re.sub('\s+', ' ', glossary['text'].strip())
glossary['text'] = glossary['text'].replace(' ', ' & ')
glossary['terms'] = re.sub('\s+', ' ', glossary['terms'].strip())
glossary['terms'] = glossary['terms'].replace(' ', ' & ')
# Update search_metadata. Set text1 to 'glossary', text2 to the terms,
# text3 to the URL, and text4 through text6 to NULL. Document is made out
# of the terms (first priority) and text (third priority)
q = """INSERT INTO search_metadata
(text1, text2, text3, text4, text5, text6, type, document)
VALUES ('Glossary', '{0}', 'https://censusreporter.org/glossary',
NULL, NULL, NULL, 'topic',
setweight(to_tsvector('{0}'), 'A') ||
setweight(to_tsvector('{1}'), 'C'));""".format(
glossary['terms'], glossary['text'])
cur.execute(q)
print cur.statusmessage
connection.commit()
cur.close()
connection.close()
return
if __name__ == "__main__":
topics = get_list_of_topics()
print "Obtained list of topics"
for topic in topics:
# Update topics dictionary with the text and tables that are
# scraped from the topic page.
topic['text'], topic['tables'], topic['table_codes'] = scrape_topic_page(**topic)
print "Finished scraping topic page '{0}'".format(topic['name'])
glossary = scrape_glossary_page()
print "Finished sraping glossary page"
remove_old_topics()
print "Removed old topics entries from search_metadata."
add_topics_to_table(topics)
add_glossary_to_table(glossary)
print "Added new topics entries to search_metadata."
| 33.573427
| 90
| 0.600014
|
ad7f27a5e16e890da61b18245a5d23c2a6cdec9a
| 5,890
|
py
|
Python
|
bin/projects.py
|
bdice/cibuildwheel
|
489d560fff0d17c67ade4f9b812c1b8263a61e87
|
[
"BSD-2-Clause"
] | null | null | null |
bin/projects.py
|
bdice/cibuildwheel
|
489d560fff0d17c67ade4f9b812c1b8263a61e87
|
[
"BSD-2-Clause"
] | null | null | null |
bin/projects.py
|
bdice/cibuildwheel
|
489d560fff0d17c67ade4f9b812c1b8263a61e87
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
Convert a yaml project list into a nice table.
Suggested usage:
./bin/projects.py docs/data/projects.yml --online --auth $GITHUB_API_TOKEN --readme README.md
git diff
"""
from __future__ import annotations
import builtins
import functools
import textwrap
import urllib.request
import xml.dom.minidom
from datetime import datetime
from io import StringIO
from pathlib import Path
from typing import Any, TextIO
import click
import yaml
from github import Github, GithubException
ICONS = (
"appveyor",
"github",
"azurepipelines",
"circleci",
"gitlab",
"travisci",
"windows",
"apple",
"linux",
)
class Project:
NAME: int = 0
def __init__(self, config: dict[str, Any], github: Github | None = None):
try:
self.name: str = config["name"]
self.gh: str = config["gh"]
except KeyError:
print("Invalid config, needs at least gh and name!", config)
raise
self.stars_repo: str = config.get("stars", self.gh)
self.notes: str = config.get("notes", "")
self.ci: list[str] = config.get("ci", [])
self.os: list[str] = config.get("os", [])
self.online = github is not None
if github is not None:
try:
repo = github.get_repo(self.stars_repo)
except GithubException:
print(f"Broken: {self.stars_repo}")
raise
self.num_stars: int = repo.stargazers_count
self.pushed_at = repo.pushed_at
if not self.notes:
notes = repo.description
if repo.description:
self.notes = notes
else:
self.num_stars = 0
self.pushed_at = datetime.utcnow()
name_len = len(self.name) + 4
self.__class__.NAME = max(self.__class__.NAME, name_len)
def __lt__(self, other: Project) -> bool:
if self.online:
return self.num_stars < other.num_stars
else:
return self.name < other.name
@classmethod
def header(cls) -> str:
return textwrap.dedent(
f"""\
| {'Name':{cls.NAME}} | CI | OS | Notes |
|{'':-^{cls.NAME+2 }}|----|----|:------|"""
)
@property
def namelink(self) -> str:
return f"[{self.name}][]"
@property
def starslink(self) -> str:
return f"![{self.name} stars][]"
@property
def url(self) -> str:
return f"https://github.com/{self.gh}"
@property
def ci_icons(self) -> str:
return " ".join(f"![{icon} icon][]" for icon in self.ci)
@property
def os_icons(self) -> str:
return " ".join(f"![{icon} icon][]" for icon in self.os)
def table_row(self) -> str:
notes = self.notes.replace("\n", " ")
return f"| {self.namelink: <{self.NAME}} | {self.ci_icons} | {self.os_icons} | {notes} |"
def links(self) -> str:
return f"[{self.name}]: {self.url}"
def info(self) -> str:
days = (datetime.utcnow() - self.pushed_at).days
return f"<!-- {self.name}: {self.num_stars}, last pushed {days} days ago -->"
def fetch_icon(icon_name: str) -> None:
url = f"https://cdn.jsdelivr.net/npm/simple-icons@v4/icons/{icon_name}.svg"
with urllib.request.urlopen(url) as f:
original_svg_data = f.read()
document = xml.dom.minidom.parseString(original_svg_data)
svgElement = document.documentElement
assert svgElement.nodeName == "svg"
svgElement.setAttribute("width", "16px")
svgElement.setAttribute("fill", "#606060")
icon_path = path_for_icon(icon_name)
icon_path.parent.mkdir(parents=True, exist_ok=True)
with open(path_for_icon(icon_name), "w") as f:
f.write(svgElement.toxml())
def path_for_icon(icon_name: str) -> Path:
return Path(".") / "docs" / "data" / "readme_icons" / f"{icon_name}.svg"
def str_projects(
config: list[dict[str, Any]],
*,
online: bool = True,
auth: str | None = None,
) -> str:
io = StringIO()
print = functools.partial(builtins.print, file=io)
if online:
for icon in ICONS:
fetch_icon(icon)
github = Github(auth) if online else None
projects = sorted((Project(item, github) for item in config), reverse=online)
print(Project.header())
for project in projects:
print(project.table_row())
print()
for project in projects:
print(project.links())
print()
for icon in ICONS:
print(f"[{icon} icon]: {path_for_icon(icon).as_posix()}")
print()
for project in projects:
print(project.info())
return io.getvalue()
@click.command(help="Try ./bin/projects.py docs/data/projects.yml --readme README.md")
@click.argument("input", type=click.File("r"))
@click.option("--online/--no-online", default=True, help="Get info from GitHub")
@click.option("--auth", help="GitHub authentication token")
@click.option("--readme", type=click.File("r+"), help="Modify a readme file if given")
def projects(
input: TextIO,
online: bool,
auth: str | None,
readme: TextIO | None,
) -> None:
config = yaml.safe_load(input)
output = str_projects(config, online=online, auth=auth)
if readme is None:
print(output)
else:
text = readme.read()
start_str = "<!-- START bin/projects.py -->\n"
start = text.find(start_str)
end = text.find("<!-- END bin/projects.py -->\n")
generated_note = f"<!-- this section is generated by bin/projects.py. Don't edit it directly, instead, edit {input.name} -->"
new_text = f"{text[:start + len(start_str)]}\n{generated_note}\n\n{output}\n{text[end:]}"
readme.seek(0)
readme.write(new_text)
readme.truncate()
if __name__ == "__main__":
projects()
| 27.652582
| 133
| 0.594228
|
7aefa547bbb6fb5c93bf611211f3fbee74a0d004
| 275
|
py
|
Python
|
models/query_models/topic_model.py
|
RuiCoreSci/Flask-Restful
|
03f98a17487d407b69b853a9bf0ed20d2c5b003b
|
[
"MIT"
] | 7
|
2020-05-24T02:15:46.000Z
|
2020-11-26T07:14:44.000Z
|
models/query_models/topic_model.py
|
RuiCoreSci/Flask-Restful
|
03f98a17487d407b69b853a9bf0ed20d2c5b003b
|
[
"MIT"
] | 12
|
2020-05-17T10:46:29.000Z
|
2021-05-06T20:08:37.000Z
|
models/query_models/topic_model.py
|
RuiCoreSci/Flask-Restful
|
03f98a17487d407b69b853a9bf0ed20d2c5b003b
|
[
"MIT"
] | 4
|
2020-05-09T07:26:09.000Z
|
2021-10-31T07:09:10.000Z
|
from models.data_types import StringType
from models.query_models.base_model import BaseQueryModel, QueryField
class TopicQueryModel(BaseQueryModel):
name = QueryField(StringType(), location="json", comment="主题名")
class RootTopicQueryModel(TopicQueryModel):
pass
| 25
| 69
| 0.803636
|
8e2a09f15c915a83255e5f89b22ed03fa180caf0
| 3,471
|
py
|
Python
|
sender.py
|
brandelli/t1-labredes
|
3611c32923591059566c847ce48a1c43fe1f2f12
|
[
"MIT"
] | null | null | null |
sender.py
|
brandelli/t1-labredes
|
3611c32923591059566c847ce48a1c43fe1f2f12
|
[
"MIT"
] | null | null | null |
sender.py
|
brandelli/t1-labredes
|
3611c32923591059566c847ce48a1c43fe1f2f12
|
[
"MIT"
] | null | null | null |
import struct
import socket
import binascii
import os
class Sender:
#0x0800 ipv4
#0x86dd ipv6
#17 UDP
#6 TCP
#def __init__(self, src, dest, data):
def __init__(self, fileName, etherType, ipProtocolType, destMac, srcMac):
self._file = self.prepareFile(fileName, 1480)
self._etherType = self.defineEtherType(etherType)
self._ipProtocolType = self.defineIpProtocol(ipProtocolType)
self._destMac = destMac
self._srcMac = srcMac
self._etherFrame = self.ethframe()
self.create()
#self._src = src
#self._dest = dest
#self._data = data
#self._packet = self.create()
@staticmethod
def src(packet):
offset = 6
mac = binascii.hexlify(struct.unpack('6s', packet[offset:offset+6])[0])
offset = 14+12
ip = socket.inet_ntoa(packet[offset:offset+4])
offset = 14+20
port = struct.unpack('!H', packet[offset:offset+2])[0]
addr = net.NetAddr(mac=mac, ip=ip, port=port)
return addr
@staticmethod
def dest(packet):
offset = 0
mac = binascii.hexlify(struct.unpack('6s', packet[offset:offset+6])[0])
offset = 14+16
ip = socket.inet_ntoa(packet[offset:offset+4])
offset = 14+22
port = struct.unpack('!H', packet[offset:offset+2])[0]
addr = net.NetAddr(mac=mac, ip=ip, port=port)
return addr
def prepareFile(self, strFileName, maxFileSize):
file = open(strFileName, 'r+b')
fileSize = os.stat(strFileName).st_size
offset = 0
arrFile = []
while (offset < fileSize):
file.seek(0,1)
fileChunk = file.read(maxFileSize)
offset+= maxFileSize
arrFile.append(fileChunk)
file.close()
return arrFile
def defineEtherType(self, etherType):
if(etherType == 'IPV4'):
return 0x0800
return 0x86DD
def defineIpProtocol(self, ipProtocolType):
if(ipProtocolType == 'UDP'):
return 17
return 6
def create(self):
#for file in self._file:
# print file
print self._etherType
print self._ipProtocolType
print self._etherFrame
def udpframe(self):
return struct.pack('HHHH',
socket.htons(self._src.port), # src port
socket.htons(self._dest.port), # dest port
socket.htons(8+len(self._data)), # length
0) # checksum
def ipv4frame(self, plen):
return struct.pack('BBHHHBBH4s4s',
69, # version, ihl
0, # dscp, ecn
socket.htons(20+plen), # length
0, # ident
0, # flags, fragment offset
255, # ttl
socket.IPPROTO_UDP, # protocol
0, # checksum
socket.inet_aton(self._src.ip), # src ip
socket.inet_aton(self._dest.ip)) # dest ip
def ethframe(self):
print self._destMac.encode()
print self._srcMac.encode()
return struct.pack('6s6s2B',
self._destMac, # dest
self._srcMac, # src
self._etherType, # etherType
0) # data
| 31.554545
| 79
| 0.528378
|
d72492bdea88c8dd51a27e7fa26e2fe5cb8f4737
| 1,544
|
py
|
Python
|
credentials_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
credentials_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
credentials_test.py
|
Bernard2030/passward-locker
|
4d654076197d58a78a9c782097c88b79c72a9653
|
[
"Unlicense"
] | null | null | null |
import unittest
from credentials import Credential
class TestOne(unittest.TestCase):
def setUp(self):
"""
Method to run before any other test case
"""
self.new_credential = Credential("user_name", "email", "passward" )
def tearDown(self):
"""
Method does clean-up after each test has run
"""
Credential.credential_list = []
def test__init__(self):
"""
test case to test if the object property is correctly initialized
"""
self.assertEqual(self.new_credential.use_name, "user_name")
self.assertEqual(self.new_credential.email, "email")
self.assertEqual(self.new_credential.passward, "passward")
def save_credential(self):
"""
test case to check if credential object is saved in the credential list
"""
self.new_credential.save_credential()
self.assertEqual(len(Credential.credential_list), 1)
def credential_available(self):
"""
test to check if we can return a boolean depending on availability of a credential
"""
self.new_credential.save_credential()
my_credential = Credential("magnet", "ben10", "Wx123")
my_credential.save_credential()
credential_exist = Credential.if_credential_available("magnet", "ben10", "Wx123")
self.assertTrue(credential_exist)
def display_credential(self):
"""
test to display saved credentials
"""
self.assertEqual(Credential.display_credential(), Credential.credential_list)
if __name__ == '__main__':
unittest.main()
| 28.072727
| 86
| 0.686528
|
bb328396a857f011e34e6189ae18f2773e82efd0
| 6,632
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/alistipessp627.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/alistipessp627.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/alistipessp627.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Alistipes sp. 627.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:16:23.371669
The undirected graph Alistipes sp. 627 has 1637 nodes and 80553 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06016 and has 27 connected components, where the component with most
nodes has 1574 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 80, the mean node degree is 98.42, and
the node degree mode is 1. The top 5 most central nodes are 1501391.LG35_00030
(degree 642), 1501391.LG35_01275 (degree 554), 1501391.LG35_07715 (degree
520), 1501391.LG35_09505 (degree 502) and 1501391.LG35_08035 (degree 493).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import AlistipesSp627
# Then load the graph
graph = AlistipesSp627()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def AlistipesSp627(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Alistipes sp. 627 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Alistipes sp. 627 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 23:16:23.371669
The undirected graph Alistipes sp. 627 has 1637 nodes and 80553 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.06016 and has 27 connected components, where the component with most
nodes has 1574 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 80, the mean node degree is 98.42, and
the node degree mode is 1. The top 5 most central nodes are 1501391.LG35_00030
(degree 642), 1501391.LG35_01275 (degree 554), 1501391.LG35_07715 (degree
520), 1501391.LG35_09505 (degree 502) and 1501391.LG35_08035 (degree 493).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import AlistipesSp627
# Then load the graph
graph = AlistipesSp627()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="AlistipesSp627",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.089947
| 223
| 0.699789
|
aac09bf668d8df5951a4298172bfaced226eba31
| 3,041
|
py
|
Python
|
abel/abel.py
|
tourdeml/abel-pytorch
|
950ad6c0fd21f766c03b59efe52df3c17ad7fc21
|
[
"MIT"
] | null | null | null |
abel/abel.py
|
tourdeml/abel-pytorch
|
950ad6c0fd21f766c03b59efe52df3c17ad7fc21
|
[
"MIT"
] | null | null | null |
abel/abel.py
|
tourdeml/abel-pytorch
|
950ad6c0fd21f766c03b59efe52df3c17ad7fc21
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn, optim
from abel.utils import get_weight_norm
import warnings
class ABEL(optim.lr_scheduler._LRScheduler):
"""
Automatic, Bouncing into Equilibration Learning rate scheduler.
Args:
optimizer (torch.optim.Optimizer): torch based optimizer
decay (float): LR decay(default=0.1)
last_epoch (int): Last executed epoch(default=-1)
current_norm (torch.Tensor): current weight norm of model(default=None)
norm_t_1 (torch.Tensor): t-1 weight norm of model(default=None)
norm_t_2 (torch.Tensor): t-2 weight norm of model(default=None)
verbose (bool): Verbosity(default=False)
"""
def __init__(self, optimizer, decay: float=0.1, last_epoch: int=-1, current_norm: torch.Tensor=None, norm_t_1: torch.Tensor=None, norm_t_2: torch.Tensor=None, verbose: bool=False):
self.decay = decay
self.current_norm = current_norm
self.norm_t_1 = norm_t_1
self.norm_t_2 = norm_t_2
self.reached_min = False
self.decay_level = 1.
if current_norm is None:
self.current_norm = get_weight_norm(optimizer.param_groups)
self.norm_t_1 = norm_t_1
self.norm_t_2 = norm_t_2
super(ABEL, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if (self.last_epoch == 0):
return [group['lr'] for group in self.optimizer.param_groups]
return [group['lr'] * self.decay ** (self.decay_level)
for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
if self.last_epoch >= 2:
assert self.current_norm is not None
assert self.norm_t_1 is not None
assert self.norm_t_2 is not None
if (self.current_norm - self.norm_t_1) * (self.norm_t_1 - self.norm_t_2) < 0:
if self.reached_min:
self.reached_min = False
self.decay_level += 1
return [base_lr * self.decay ** (self.decay_level)
for base_lr in self.base_lrs]
else:
self.reached_min = True
return [base_lr * self.decay ** (self.decay_level)
for base_lr in self.base_lrs]
return [base_lr
for base_lr in self.base_lrs]
def step(self, epoch=None):
if self.last_epoch >= 2:
self.norm_t_2 = self.norm_t_1
self.norm_t_1 = self.current_norm
self.current_norm = get_weight_norm(self.optimizer.param_groups)
elif self.last_epoch == 1:
self.norm_t_1 = self.current_norm
self.current_norm = get_weight_norm(self.optimizer.param_groups)
super(ABEL, self).step(epoch)
| 38.493671
| 184
| 0.609997
|
4bb3c7edcd9b72b5decdeda7fa67bd102f0ecf0d
| 2,004
|
py
|
Python
|
steamstoreprice/steamstoreprice.py
|
Mirio/steamstoreprice
|
0372ed6c63e2b5cdb4b37bc5ae7518c7f9f4950b
|
[
"BSD-2-Clause"
] | null | null | null |
steamstoreprice/steamstoreprice.py
|
Mirio/steamstoreprice
|
0372ed6c63e2b5cdb4b37bc5ae7518c7f9f4950b
|
[
"BSD-2-Clause"
] | null | null | null |
steamstoreprice/steamstoreprice.py
|
Mirio/steamstoreprice
|
0372ed6c63e2b5cdb4b37bc5ae7518c7f9f4950b
|
[
"BSD-2-Clause"
] | null | null | null |
from steamstoreprice.exception import UrlNotSteam, PageNotFound, RequestGenericError
from bs4 import BeautifulSoup
import requests
class SteamStorePrice:
def normalizeurl(self, url):
"""
clean the url from referal and other stuff
:param url(string): amazon url
:return: string(url cleaned)
"""
if "://store.steampowered.com/app" in url:
return url
else:
raise UrlNotSteam("Please check the url, it doesn't contain store.steampowered.com/app*")
def normalizeprice(self, price):
"""
remove the currenty from price
:param price(string): price tag find on amazon store
:return: float(price cleaned)
"""
listreplace = ["€", "$", "£", "\t", "\r\n"]
for replacestring in listreplace:
price = price.replace(replacestring, "")
return float(price.replace(",", "."))
def getpage(self, url):
"""
Get the page and raise if status_code is not equal to 200
:param url(string): normalized(url)
:return: bs4(html)
"""
url = self.normalizeurl(url)
req = requests.get(url)
if req.status_code == 200:
return BeautifulSoup(req.text, "html.parser")
elif req.status_code == 404:
raise PageNotFound("Page not found, please check url")
else:
raise RequestGenericError("Return Code: %s, please check url" % req.status_code)
def getprice(self, url):
"""
Find the price on AmazonStore starting from URL
:param url(string): url
:return: float(price cleaned)
"""
body_content = self.getpage(self.normalizeurl(url))
try:
return self.normalizeprice(body_content.find("div", {"class": "game_purchase_price"}).contents[0])
except AttributeError:
return self.normalizeprice(body_content.find("div", {"class": "discount_final_price"}).contents[0])
| 30.363636
| 111
| 0.602794
|
375dc74c3bd654cdd3021d5bc3d3738de8013041
| 5,945
|
py
|
Python
|
qa/rpc-tests/prioritise_transaction.py
|
Ankh-Trust/electrum-core
|
360efcd3d6907e342608e3d2dc7628d6be639619
|
[
"MIT"
] | 2
|
2019-10-31T12:02:24.000Z
|
2019-11-09T11:08:03.000Z
|
qa/rpc-tests/prioritise_transaction.py
|
Ankh-Trust/electrum-core
|
360efcd3d6907e342608e3d2dc7628d6be639619
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/prioritise_transaction.py
|
Ankh-Trust/electrum-core
|
360efcd3d6907e342608e3d2dc7628d6be639619
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import ElectrumTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_SIZE
class PrioritiseTransactionTest(ElectrumTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| 41.284722
| 128
| 0.643734
|
bfda1045087daad29ef57a69456567a403486160
| 3,026
|
py
|
Python
|
custom/hex_0x.py
|
jfcherng-sublime/ST-ColorHelper
|
b717bd82ec8325517ba1d8fd5ab0fc7937e56b38
|
[
"MIT"
] | null | null | null |
custom/hex_0x.py
|
jfcherng-sublime/ST-ColorHelper
|
b717bd82ec8325517ba1d8fd5ab0fc7937e56b38
|
[
"MIT"
] | null | null | null |
custom/hex_0x.py
|
jfcherng-sublime/ST-ColorHelper
|
b717bd82ec8325517ba1d8fd5ab0fc7937e56b38
|
[
"MIT"
] | null | null | null |
"""Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
from ..lib.coloraide import Color
from ..lib.coloraide.spaces.srgb.css import SRGB
from ..lib.coloraide.spaces import _parse
from ..lib.coloraide import util
import copy
import re
def norm_hex_channel(string):
"""Normalize the hex string to a form we can handle."""
if string.startswith('0x'):
return int(string[2:], 16) * _parse.RGB_CHANNEL_SCALE
else:
raise ValueError("String format of a hex channel must be in the form of '0xXX'")
class HexSRGB(SRGB):
"""SRGB that looks for alpha first in hex format."""
MATCH = re.compile(r"\b0x(?:[0-9a-fA-f]{8}|[0-9a-fA-f]{6})\b")
@classmethod
def match(cls, string, start=0, fullmatch=True):
"""Match a CSS color string."""
m = cls.MATCH.match(string, start)
if m is not None and (not fullmatch or m.end(0) == len(string)):
return cls.split_channels(m.group(0)), m.end(0)
return None, None
@classmethod
def translate_channel(cls, channel, value):
"""Translate channel string."""
# Unless it explicitly starts with `0x` we will assume it is a int/float.
if -1 <= channel <= 2:
return norm_hex_channel(value)
@classmethod
def split_channels(cls, color):
"""Split channels."""
return cls.null_adjust(
(
cls.translate_channel(0, '0x' + color[2:4]),
cls.translate_channel(1, '0x' + color[4:6]),
cls.translate_channel(2, '0x' + color[6:8])
),
cls.translate_channel(-1, '0x' + color[8:]) if len(color) > 8 else 1.0
)
def to_string(
self, parent, *, options=None, alpha=None, precision=None, fit=True, **kwargs
):
"""Convert to Hex format."""
options = kwargs
a = util.no_nan(self.alpha)
show_alpha = alpha is not False and (alpha is True or a < 1.0)
template = "0x{:02x}{:02x}{:02x}{:02x}" if show_alpha else "0x{:02x}{:02x}{:02x}"
if options.get("upper"):
template = template.upper()
method = None if not isinstance(fit, str) else fit
coords = util.no_nan(parent.fit(method=method).coords())
if show_alpha:
value = template.format(
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0)),
int(util.round_half_up(a * 255.0))
)
else:
value = template.format(
int(util.round_half_up(coords[0] * 255.0)),
int(util.round_half_up(coords[1] * 255.0)),
int(util.round_half_up(coords[2] * 255.0))
)
return value
class ColorHex(Color):
"""Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
CS_MAP = copy.copy(Color.CS_MAP)
CS_MAP["srgb"] = HexSRGB
| 33.622222
| 100
| 0.582287
|
207b6a519a3bd7326b4c350bc59c3e3644d275f0
| 1,835
|
py
|
Python
|
src/client_response.py
|
mtn/raft
|
60be5c7c32fa8d9331e74f9d5ff06deb5e4e5523
|
[
"MIT"
] | null | null | null |
src/client_response.py
|
mtn/raft
|
60be5c7c32fa8d9331e74f9d5ff06deb5e4e5523
|
[
"MIT"
] | 1
|
2021-06-01T22:21:44.000Z
|
2021-06-01T22:21:44.000Z
|
src/client_response.py
|
mtn/raft
|
60be5c7c32fa8d9331e74f9d5ff06deb5e4e5523
|
[
"MIT"
] | null | null | null |
"Responses that go the client (and thus have no dest field)"
from message import Message
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=missing-docstring
class ClientResponse(Message):
def __init__(self, src, req_id):
Message.__init__(self, src, None)
# The id that identifies the request itself
self.req_id = req_id
class SetResponse(ClientResponse):
"Response to client set request"
def __init__(self, src, req_id, key=None, value=None, error=None):
assert key is not None and value is not None or error is not None
ClientResponse.__init__(self, src, req_id)
self.key = key
self.val = value
self.err = error
def serialize(self):
serialized = {"source": self.src, "type": "getResponse", "id": self.req_id}
if self.val:
serialized["value"] = self.val
serialized["key"] = self.key
else:
serialized["error"] = self.err
return [serialized]
def __repr__(self):
return "SET({})".format(self.req_id)
class GetResponse(ClientResponse):
"Response to client get request"
def __init__(self, src, req_id, key=None, value=None, error=None):
assert key is not None and value is not None or error is not None
ClientResponse.__init__(self, src, req_id)
self.key = key
self.val = value
self.err = error
def serialize(self):
serialized = {"source": self.src, "type": "getResponse", "id": self.req_id}
if self.err:
serialized["error"] = self.err
else:
serialized["key"] = self.key
serialized["value"] = self.val
return [serialized]
def __repr__(self):
return "GET({})".format(self.req_id)
| 26.214286
| 83
| 0.618529
|
9083b51c2f7f113855a0b587233198c6c194ea9c
| 3,869
|
py
|
Python
|
vortexfitting/output.py
|
guilindner/VortexFitting
|
97081a7a27497e77e3244e47353a4281e5f83a77
|
[
"MIT"
] | 13
|
2018-06-19T17:34:09.000Z
|
2021-09-28T15:11:25.000Z
|
vortexfitting/output.py
|
ElsevierSoftwareX/SOFTX-D-20-00015
|
9a7d8d4c38114147dc42ddba90290eef0735b1e3
|
[
"MIT"
] | 4
|
2018-05-16T15:10:13.000Z
|
2021-08-06T07:09:01.000Z
|
vortexfitting/output.py
|
guilindner/VortexFitting
|
97081a7a27497e77e3244e47353a4281e5f83a77
|
[
"MIT"
] | 8
|
2017-07-23T10:33:32.000Z
|
2020-10-08T14:57:02.000Z
|
#!/usr/bin/env/ python3
"""
Create an output file for the detected vortices, with tecplot format
"""
import os
import numpy as np
def create(output_directory, args):
"""
Create an output file
:param output_directory: directory hosting the file vortices.dat
:type output_directory: str
:param args: directory hosting the file vortices.dat
:type args: class parser
:returns: file with time, radius, gamma, xcenter, ycenter, u_advection, v_advection, correlation, vtheta
:rtype: file
"""
if not os.path.exists(output_directory):
os.makedirs(output_directory)
outfile = open(output_directory + '/vortices.dat', 'w')
outfile.write("TITLE=\"Vortex characteristics evolution\"\n")
outfile.write("Variables=\"time\",\"radius\",\"gamma\",\"xcenter\",\"ycenter\","
"\"u_advection\",\"v_advection\",\"correlation\",\"vtheta\"\n")
outfile.write("DATASETAUXDATA Detection_method=\"{}\"\n".format(args.detection_method))
if args.scheme == 22:
outfile.write("DATASETAUXDATA Scheme=\"{}\"\n".format('least_square'))
else:
outfile.write("DATASETAUXDATA Scheme=\"{}\"\n".format(args.scheme))
outfile.write("DATASETAUXDATA Box_size=\"{}\"\n".format(args.box_size))
outfile.write("DATASETAUXDATA Detection_threshold=\"{}\"\n".format(args.detection_threshold))
outfile.write("DATASETAUXDATA Rmax=\"{}\"\n".format(args.rmax))
outfile.write("DATASETAUXDATA Correlation_threshold=\"{}\"\n".format(args.correlation_threshold))
outfile.write("DATASETAUXDATA Vortex_Model=\"{}\"\n".format('Lamb_Oseen'))
outfile.write("DATASETAUXDATA Mean_file=\"{}\"\n".format(args.mean_filename))
outfile.write("DATASETAUXDATA File_type=\"{}\"\n".format(args.file_type))
outfile.write("ZONE T=\"0\", SOLUTIONTIME=0\n")
outfile.close()
def write(vortices, output_directory, time_step):
"""
Update an output file
:param vortices: list of the detected vortices
:param output_directory: directory hosting the file vortices.dat
:param time_step: time of the current velocity field
:type vortices: list
:type output_directory: str
:type time_step: int
:returns: empty
:rtype: empty
"""
outfile = open(output_directory + '/vortices.dat', 'a')
for i, line in enumerate(vortices):
outfile.write("{0} {1} {2} {3} {4} {5} {6} {7} {8}\n".format(time_step, line[0], line[1], line[2], line[3],
line[4], line[5], line[7], line[8]))
outfile.close()
def write_field(output_file, detection_method, vfield, detection_field):
"""
Write a detection field file
:param output_file: directory hosting the file vortices.dat
:param detection_method: writes the selected detection method (Q, Delta, swirling strength)
:param vfield: full size velocity field
:param detection_field: full size detection field
:type output_file: str
:type detection_method: str
:type vfield: ndarray
:type detection_field: ndarray
:returns: file
:rtype: file
"""
if not os.path.exists(output_file):
os.makedirs(output_file)
outfile = open(output_file, 'w')
outfile.write("TITLE=\"Detection field\"\n")
outfile.write("Variables=\"X\",\"Y\",\"{}\"\n".format(detection_method))
outfile.write(
"ZONE T=\"0\", I={:d}, J={:d}, SOLUTIONTIME=0\n".format(vfield.x_coordinate_size, vfield.y_coordinate_size))
for j in np.arange(0, vfield.y_coordinate_size, 1):
for i in np.arange(0, vfield.x_coordinate_size, 1):
outfile.write("{0} {1} {2}\n".format(str(vfield.x_coordinate_matrix[j]), str(vfield.y_coordinate_matrix[i]),
detection_field[i, j]))
outfile.write("{0} {1} {2}\n".format(0, 0, 0))
outfile.close()
| 41.159574
| 120
| 0.653657
|
2b4572b7945b1b36200d9a7c1f30df56949329e0
| 240
|
py
|
Python
|
resilient-sdk/tests/test_cmds/ext/test_ext_package.py
|
lmahoney1/resilient-python-api
|
ae7db374e6e79a03e555c3b9ff3c723c3314f673
|
[
"MIT"
] | null | null | null |
resilient-sdk/tests/test_cmds/ext/test_ext_package.py
|
lmahoney1/resilient-python-api
|
ae7db374e6e79a03e555c3b9ff3c723c3314f673
|
[
"MIT"
] | null | null | null |
resilient-sdk/tests/test_cmds/ext/test_ext_package.py
|
lmahoney1/resilient-python-api
|
ae7db374e6e79a03e555c3b9ff3c723c3314f673
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
from resilient_sdk.cmds import CmdExtPackage
def test_setup():
# TODO
pass
def test_execute_command():
# TODO
pass
| 16
| 58
| 0.6625
|
244deacaf6a0417098af21a014b9f0b0fe6be847
| 1,159
|
py
|
Python
|
ProjectApplication/project_core/migrations/0131_add_postal_address.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5
|
2020-07-29T10:00:11.000Z
|
2022-02-19T11:00:34.000Z
|
ProjectApplication/project_core/migrations/0131_add_postal_address.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 471
|
2019-09-20T14:37:28.000Z
|
2022-03-25T14:16:34.000Z
|
ProjectApplication/project_core/migrations/0131_add_postal_address.py
|
code-review-doctor/project-application
|
d85b40b69572efbcda24ce9c40803f76d8ffd192
|
[
"MIT"
] | 5
|
2020-03-15T12:42:47.000Z
|
2022-02-15T18:06:52.000Z
|
# Generated by Django 3.0.8 on 2020-07-30 14:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project_core', '0130_postaladdress'),
]
operations = [
migrations.AddField(
model_name='historicalproposal',
name='postal_address',
field=models.ForeignKey(blank=True, db_constraint=False, help_text='Address to where the grant agreement is going to be sent', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='project_core.PostalAddress'),
),
migrations.AddField(
model_name='proposal',
name='postal_address',
field=models.ForeignKey(help_text='Address to where the grant agreement is going to be sent', null=True, on_delete=django.db.models.deletion.PROTECT, to='project_core.PostalAddress'),
),
migrations.AlterField(
model_name='postaladdress',
name='address',
field=models.TextField(help_text='Include street/avenue, block, building, floor, door, etc.'),
),
]
| 38.633333
| 249
| 0.660915
|
efa0781448a789879bb59777996a05c086258df6
| 872
|
py
|
Python
|
instagramHome/migrations/0005_likes.py
|
philip-bbaale/InstagramClone
|
9f2f219e167224585e6681f8aa00f1f5b0d3ecb9
|
[
"MIT"
] | 1
|
2020-06-06T09:35:53.000Z
|
2020-06-06T09:35:53.000Z
|
instagramHome/migrations/0005_likes.py
|
philip-bbaale/InstagramClone
|
9f2f219e167224585e6681f8aa00f1f5b0d3ecb9
|
[
"MIT"
] | 4
|
2021-06-08T21:40:44.000Z
|
2022-01-13T02:48:45.000Z
|
instagramHome/migrations/0005_likes.py
|
philip-bbaale/InstagramClone
|
9f2f219e167224585e6681f8aa00f1f5b0d3ecb9
|
[
"MIT"
] | 1
|
2020-06-03T15:08:04.000Z
|
2020-06-03T15:08:04.000Z
|
# Generated by Django 3.0.6 on 2020-06-01 05:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instagramHome', '0004_delete_likes'),
]
operations = [
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likes', models.IntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagramHome.Post')),
],
),
]
| 33.538462
| 120
| 0.641055
|
9a67f6014d3e1581dd7ba0c56ed2a3ec123a0b4d
| 164
|
py
|
Python
|
vizdoomaze/envs/vizdoomazethree11.py
|
fanyuzeng/Vizdoomaze
|
5b444f2d861c908c4d96ae374bcce660d364f22e
|
[
"MIT"
] | 3
|
2020-09-25T16:00:49.000Z
|
2020-10-29T10:32:30.000Z
|
vizdoomaze/envs/vizdoomazethree11.py
|
fanyuzeng/Vizdoomaze
|
5b444f2d861c908c4d96ae374bcce660d364f22e
|
[
"MIT"
] | null | null | null |
vizdoomaze/envs/vizdoomazethree11.py
|
fanyuzeng/Vizdoomaze
|
5b444f2d861c908c4d96ae374bcce660d364f22e
|
[
"MIT"
] | 1
|
2021-12-17T07:50:47.000Z
|
2021-12-17T07:50:47.000Z
|
from vizdoomaze.envs.vizdoomenv import VizdoomEnv
class vizdoomazeThree11(VizdoomEnv):
def __init__(self):
super(vizdoomazeThree11, self).__init__(64)
| 27.333333
| 51
| 0.77439
|
b2832a1c020573eb0873fe13c5565de2e12b079b
| 5,721
|
py
|
Python
|
hail_scripts/elasticsearch/elasticsearch_client_v7.py
|
macarthur-lab/hail-elasticsearch-pipelines
|
7082681fd125e4f23a512aeff49853c5fc0f3136
|
[
"MIT"
] | 15
|
2017-11-22T14:48:04.000Z
|
2020-10-05T18:22:24.000Z
|
hail_scripts/elasticsearch/elasticsearch_client_v7.py
|
macarthur-lab/hail-elasticsearch-pipelines
|
7082681fd125e4f23a512aeff49853c5fc0f3136
|
[
"MIT"
] | 86
|
2017-12-14T23:45:29.000Z
|
2020-10-13T18:15:54.000Z
|
hail_scripts/elasticsearch/elasticsearch_client_v7.py
|
macarthur-lab/hail-elasticsearch-pipelines
|
7082681fd125e4f23a512aeff49853c5fc0f3136
|
[
"MIT"
] | 7
|
2019-01-29T09:08:10.000Z
|
2020-02-25T16:22:57.000Z
|
import datetime
import inspect
import logging
import time
from pprint import pformat
try:
import elasticsearch
except ImportError:
import os
os.system("pip install elasticsearch==7.9.1")
import elasticsearch
handlers = set(logging.root.handlers)
logging.root.handlers = list(handlers)
logger = logging.getLogger()
LOADING_NODES_NAME = 'elasticsearch-es-data-loading*'
class ElasticsearchClient:
def __init__(self, host='localhost', port='9200', es_username='pipeline', es_password=None):
"""Constructor.
Args:
host (str): Elasticsearch server host
port (str): Elasticsearch server port
es_username (str): Elasticsearch username
es_password (str): Elasticsearch password
"""
self._host = host
self._port = port
self._es_username = es_username
self._es_password = es_password
http_auth = (self._es_username, self._es_password) if self._es_password else None
self.es = elasticsearch.Elasticsearch(host, port=port, http_auth=http_auth)
# check connection
logger.info(pformat(self.es.info()))
def create_index(self, index_name, elasticsearch_schema, num_shards=1, _meta=None):
"""Calls es.indices.create to create an elasticsearch index with the appropriate mapping.
Args:
index_name (str): elasticsearch index mapping
elasticsearch_schema (dict): elasticsearch mapping "properties" dictionary
num_shards (int): how many shards the index will contain
_meta (dict): optional _meta info for this index
(see https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html)
"""
self.create_or_update_mapping(
index_name, elasticsearch_schema, num_shards=num_shards, _meta=_meta, create_only=True,
)
def create_or_update_mapping(self, index_name, elasticsearch_schema, num_shards=1, _meta=None, create_only=False):
"""Calls es.indices.create or es.indices.put_mapping to create or update an elasticsearch index mapping.
Args:
index_name (str): elasticsearch index mapping
elasticsearch_schema (dict): elasticsearch mapping "properties" dictionary
num_shards (int): how many shards the index will contain
_meta (dict): optional _meta info for this index
(see https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html)
create_only (bool): only allow index creation, throws an error if index already exists
"""
index_mapping = {
'properties': elasticsearch_schema,
}
if _meta:
logger.info('==> index _meta: ' + pformat(_meta))
index_mapping['_meta'] = _meta
if not self.es.indices.exists(index=index_name):
body = {
'mappings': index_mapping,
'settings': {
'number_of_shards': num_shards,
'number_of_replicas': 0,
'index.mapping.total_fields.limit': 10000,
'index.refresh_interval': -1,
'index.codec': 'best_compression', # halves disk usage, no difference in query times
}
}
logger.info('create_mapping - elasticsearch schema: \n' + pformat(elasticsearch_schema))
logger.info('==> creating elasticsearch index {}'.format(index_name))
self.es.indices.create(index=index_name, body=body)
else:
if create_only:
raise ValueError('Index {} already exists'.format(index_name))
logger.info('==> updating elasticsearch index {}. New schema:\n{}'.format(
index_name, pformat(elasticsearch_schema)))
self.es.indices.put_mapping(index=index_name, body=index_mapping)
def route_index_to_temp_es_cluster(self, index_name):
"""
Apply shard allocation filtering rules to route the given index to elasticsearch loading nodes
"""
self._update_settings(index_name, {
'index.routing.allocation.require._name': LOADING_NODES_NAME,
'index.routing.allocation.exclude._name': ''
})
def route_index_off_temp_es_cluster(self, index_name):
"""
Move any shards in the given index off of loading nodes
"""
self._update_settings(index_name, {
'index.routing.allocation.require._name': '',
'index.routing.allocation.exclude._name': LOADING_NODES_NAME
})
def _update_settings(self, index_name, body):
logger.info('==> Setting {} settings = {}'.format(index_name, body))
self.es.indices.put_settings(index=index_name, body=body)
def get_index_meta(self, index_name):
mappings = self.es.indices.get_mapping(index=index_name)
return mappings.get(index_name, {}).get('mappings', {}).get('_meta', {})
def wait_for_shard_transfer(self, index_name, num_attempts=1000):
"""
Wait for shards to move off of the loading nodes before connecting to seqr
"""
for i in range(num_attempts):
shards = self.es.cat.shards(index=index_name)
if LOADING_NODES_NAME not in shards:
logger.warning("Shards are on {}".format(shards))
return
logger.warning("Waiting for {} shards to transfer off the es-data-loading nodes: \n{}".format(
len(shards.strip().split("\n")), shards))
time.sleep(5)
raise Exception('Shards did not transfer off loading nodes')
| 38.655405
| 118
| 0.637476
|
d79af0cded454b6a691d5d96a160e7a90a9cef8b
| 4,377
|
py
|
Python
|
tests/unit/dataactvalidator/test_c5_award_financial_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_c5_award_financial_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_c5_award_financial_1.py
|
brianherman/data-act-broker-backend
|
80eb055b9d245046192f7ad4fd0be7d0e11d2dec
|
[
"CC0-1.0"
] | 1
|
2020-07-17T23:50:56.000Z
|
2020-07-17T23:50:56.000Z
|
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
from decimal import Decimal
_FILE = 'c5_award_financial_1'
def test_column_headers(database):
expected_subset = {'row_number', 'gross_outlay_amount_by_awa_cpe', 'gross_outlays_undelivered_cpe',
'gross_outlays_delivered_or_cpe', 'difference', 'uniqueid_TAS', 'uniqueid_PIID', 'uniqueid_FAIN',
'uniqueid_URI'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Test that calculation passes with equal values and with a null """
value_one = Decimal('101.23')
value_two = Decimal('102.34')
value_three = Decimal('103.45')
value_four = Decimal('104.56')
award_fin = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one - value_three + value_two - value_four,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=value_four)
award_fin_2 = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=-value_three + value_two - value_four,
gross_outlays_undelivered_cpe=None,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=value_four)
award_fin_3 = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one - value_three - value_four,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=None,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=value_four)
award_fin_4 = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one + value_two - value_four,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=None,
gross_outlays_delivered_or_fyb=value_four)
award_fin_5 = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one - value_three + value_two,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=None)
assert number_of_errors(_FILE, database, models=[award_fin, award_fin_2, award_fin_3, award_fin_4,
award_fin_5]) == 0
def test_failure(database):
""" Test that calculation fails for unequal values """
value_one = Decimal('101.23')
value_two = Decimal('102.34')
value_three = Decimal('103.45')
value_four = Decimal('104.56')
award_fin = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one - value_three + value_two - value_four,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=None)
award_fin_2 = AwardFinancialFactory(gross_outlay_amount_by_awa_cpe=value_one + value_two,
gross_outlays_undelivered_cpe=value_one,
gross_outlays_delivered_or_cpe=value_two,
gross_outlays_undelivered_fyb=value_three,
gross_outlays_delivered_or_fyb=value_four)
assert number_of_errors(_FILE, database, models=[award_fin, award_fin_2]) == 2
| 61.647887
| 120
| 0.604981
|
09ed6ea39e4d99c712993a8f39f9ce8f992c67fc
| 1,996
|
py
|
Python
|
util/loss_and_optim.py
|
Lornatang/tf-gans
|
8be24886ce42ba98dfd9429fd3c594a756bf46bf
|
[
"MIT"
] | 5
|
2020-02-13T23:19:26.000Z
|
2021-07-12T14:39:32.000Z
|
util/loss_and_optim.py
|
Lornatang/tf-dcgan
|
eedeb8d3ca8d038a89324773e2ab9f57f3977d59
|
[
"MIT"
] | 1
|
2021-03-29T03:42:19.000Z
|
2021-03-29T03:42:19.000Z
|
util/loss_and_optim.py
|
Lornatang/tf-dcgan
|
eedeb8d3ca8d038a89324773e2ab9f57f3977d59
|
[
"MIT"
] | 3
|
2019-08-18T07:23:16.000Z
|
2020-02-10T16:00:24.000Z
|
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
#
# Licensed under the MIT License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://opensource.org/licenses/MIT
# ==============================================================================
"""Generate optim loss and Discriminate optim loss"""
import tensorflow as tf
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def generator_loss(fake_output):
""" The generator's loss quantifies how well it was able to trick the discriminator.
Intuitively, if the generator is performing well, the discriminator will classify the fake images as real (or 1).
Here, we will compare the discriminators decisions on the generated images to an array of 1s.
Args:
fake_output: generate pic.
Returns:
loss
"""
return cross_entropy(tf.ones_like(fake_output), fake_output)
def discriminator_loss(real_output, fake_output):
""" This method quantifies how well the discriminator is able to distinguish real images from fakes.
It compares the discriminator's predictions on real images to an array of 1s, and the discriminator's predictions
on fake (generated) images to an array of 0s.
Args:
real_output: origin pic.
fake_output: generate pic.
Returns:
real loss + fake loss
"""
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_optimizer():
""" The training generator optimizes the network.
Returns:
optim loss.
"""
return tf.keras.optimizers.Adam(lr=1e-4)
def discriminator_optimizer():
""" The training discriminator optimizes the network.
Returns:
optim loss.
"""
return tf.keras.optimizers.Adam(lr=1e-4)
| 28.514286
| 119
| 0.708417
|
98af16983c48965399f2440878f50f2fbd285cd5
| 2,253
|
py
|
Python
|
perspective.py
|
Jessica001cheng/CarND-Advanced-Lane-Lines
|
23bb10899473507a5e1d2a876717e0e466579993
|
[
"MIT"
] | null | null | null |
perspective.py
|
Jessica001cheng/CarND-Advanced-Lane-Lines
|
23bb10899473507a5e1d2a876717e0e466579993
|
[
"MIT"
] | null | null | null |
perspective.py
|
Jessica001cheng/CarND-Advanced-Lane-Lines
|
23bb10899473507a5e1d2a876717e0e466579993
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
from helper import showImages, showSidebySide
from camera_calibrate import undistortImages
cameraCalibration = pickle.load( open('./pickled_data/camera_calibration.p', 'rb' ) )
mtx, dist = map(cameraCalibration.get, ('mtx', 'dist'))
## Read test image
testImages = list(map(lambda imageFileName: cv2.imread(imageFileName),
glob.glob('./test_images/st*.jpg')))
testImagesName = glob.glob('./test_images/st*.jpg')
print("test images num:", len(testImages))
index = 1
print("test images name:", testImagesName[index])
## Convert to RGB image
## test4.img to test
#testImage = cv2.imread('./test_images/test4.jpg')
original = cv2.cvtColor(testImages[index],cv2.COLOR_BGR2RGB)
undist = cv2.undistort(original, mtx, dist, None, mtx)
xSize, ySize, _ = undist.shape
copy = undist.copy()
bottomY = 720
topY = 500
left1 = (201, bottomY)
left1_x, left1_y = left1
left2 = (528, topY)
left2_x, left2_y = left2
right1 = (768, topY)
right1_x, right1_y = right1
right2 = (1100, bottomY)
right2_x, right2_y = right2
color = [255, 0, 0]
w = 2
cv2.line(copy, left1, left2, color, w)
cv2.line(copy, left2, right1, color, w)
cv2.line(copy, right1, right2, color, w)
cv2.line(copy, right2, left1, color, w)
showSidebySide(undist, copy, "original", "source_line_drawed")
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
src = np.float32([
[left2_x, left2_y],
[right1_x, right1_y],
[right2_x, right2_y],
[left1_x, left1_y]
])
nX = gray.shape[1]
nY = gray.shape[0]
img_size = (nX, nY)
offset = 200
dst = np.float32([
[offset, 0],
[img_size[0]-offset, 0],
[img_size[0]-offset, img_size[1]],
[offset, img_size[1]]
])
img_size = (gray.shape[1], gray.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_size)
showSidebySide(undist, warped, "original", "Perspective_transformed")
#pickle.dump( { 'M': M, 'Minv': Minv }, open('./pickled_data/perspective_transform.p', 'wb'))
print(M)
print(Minv)
| 29.644737
| 93
| 0.658677
|
40b14fccf59c974e6a989dbcd09bf348719103cb
| 5,765
|
py
|
Python
|
rvpy/gamma.py
|
TimothyKBook/distributions
|
301fd61df894d4b300176e287bf9e725378c38eb
|
[
"MIT"
] | 1
|
2018-06-27T17:22:56.000Z
|
2018-06-27T17:22:56.000Z
|
rvpy/gamma.py
|
TimothyKBook/distributions
|
301fd61df894d4b300176e287bf9e725378c38eb
|
[
"MIT"
] | null | null | null |
rvpy/gamma.py
|
TimothyKBook/distributions
|
301fd61df894d4b300176e287bf9e725378c38eb
|
[
"MIT"
] | 1
|
2018-06-12T13:38:25.000Z
|
2018-06-12T13:38:25.000Z
|
import numpy as np
from scipy.stats import gamma
from . import distribution, laplace, weibull
class Gamma(distribution.Distribution):
"""
Gamma Distribution using the following parameterization:
f(x | alpha, beta) = 1 / (beta**alpha * Gamma(alpha)) * x**(alpha - 1) * exp(-x / beta)
Parameters
----------
alpha : float, positive
Shape parameter
beta : float, positive
Scale parameter
Methods
-------
to_exponential()
Converts self to Exponential if alpha == 1
to_chisq()
Converts self to ChiSq if beta == 2
mgf(t)
Moment generating function
Relationships
-------------
Let X, Y be Gamma, c float. Then:
* X + Y is Gamma if betas match
* cX is Gamma
"""
def __init__(self, alpha, beta):
"""
Parameters
----------
alpha : float, positive
Shape parameter
beta : float, positive
Scale parameter
"""
assert alpha > 0 and beta > 0, "alpha and beta must be positive"
self.alpha = alpha
self.beta = beta
# Scipy backend
self.sp = gamma(a=alpha, scale=beta)
# Initialize super
super().__init__()
def __repr__(self):
return f"Gamma(alpha={self.alpha}, beta={self.beta})"
def __add__(self, other):
if isinstance(other, Gamma):
if self.beta != other.beta:
raise ValueError("Scale paramters of Gamma families must match")
else:
return Gamma(self.alpha + other.alpha, self.beta)
else:
raise TypeError("Only addition/subtraction of Gamma families supported")
def __sub__(self, other):
try:
other = other.to_exponential()
self.to_exponential()
except:
raise TypeError("Only subtraction of two Exponential random variables currently supported")
if other.scale == self.to_exponential().scale:
return laplace.Laplace(0, other.scale)
else:
raise TypeError("Difference of Exponentials must share scale parameter")
def __mul__(self, other):
if isinstance(other, (int, float)):
return Gamma(self.alpha, other*self.beta)
else:
raise TypeError("Only multiplication by scalar supported")
def __truediv__(self, other):
if isinstance(other, (int, float)) and other != 0:
return self.__mul__(1 / other)
else:
raise ZeroDivisionError("Cannot divide by zero!")
def mgf(self, t):
return np.where(t < 1/self.beta,
(1 - self.beta * t) ** (-self.alpha),
np.nan
)
def to_exponential(self):
assert self.alpha == 1, "Alpha must be 1 to downcast to Exponential"
return Exponential(self.beta)
def to_chisq(self):
assert self.beta == 2, "Beta must be 2 to downcast to ChiSq"
return ChiSq(2*self.alpha)
class Exponential(Gamma):
"""
Exponential Distribution using the following parameterization:
f(x | scale) = 1 / scale * exp(-x / scale)
Parameters
----------
scale : float, positive
Scale parameter
Methods
-------
to_gamma()
Converts self to Gamma
Relationships
-------------
Let X, Y be Exponential, c float. Then:
* X + Y is Gamma if scale parameters match
* X - Y is Laplace (not yet implemented)
* X**c is Weibull (not yet implemented)
* sqrt(X) is Rayleigh (not yet implemented)
* cX is Exponential
"""
def __init__(self, scale):
"""
Parameters
----------
scale : float, positive
Scale parameter
"""
# Get Gamma distribution initialization
super().__init__(1, scale)
# Parameters
self.scale = scale
self.rate = 1 / scale
def __repr__(self):
return f"Exponential(scale={self.scale})"
def __mul__(self, other):
if isinstance(other, (int, float)):
return Exponential(other*self.scale)
else:
raise TypeError("Only multiplication by scalar supported")
# TODO: def __sub__(self): -> Laplace
# TODO: def __pow__(self): -> Weibull
# TODO: sqrt --> Rayleigh
def to_gamma(self):
return Gamma(alpha=1, beta=self.scale)
# NOTE: .to_chisq() unnecessary since special case when scale == 2
# inherits from Gamma.
class ChiSq(Gamma):
"""
Chi Square Distribution using the following parameterization:
f(x | df) = 1 / (2**(df/2) * Gamma(k/2)) * x**(k/2 - 1) * exp(-x/2)
Parameters
----------
df : integer, positive
Degrees of freedom
Methods
-------
to_gamma()
Converts self to Gamma
to_exponential()
Converts self to Exponential when df == 2
Relationships
-------------
Let X, Y be Chi Squared. Then:
* X + Y is Chi Squared
"""
def __init__(self, df):
"""
Parameters
----------
df : integer, positive
Degrees of freedom
"""
assert isinstance(df, int), "Only integer degrees of freedome allowed."
# Get Gamma distribution initialization
super().__init__(alpha=df/2, beta=2)
# Parameters
self.df = df
def __repr__(self):
return f"ChiSq(df={self.df})"
def __add__(self, other):
if isinstance(other, ChiSq):
return ChiSq(self.df + other.df)
else:
return self.to_gamma() + other
def to_gamma(self):
return Gamma(alpha=self.df/2, beta=2)
# NOTE: .to_exponential() unnecessary since case when df == 2
# inherits from Gamma.
| 27.193396
| 103
| 0.569124
|
86f723761740ead70b1f780ed4f7d2ca5667237c
| 2,341
|
py
|
Python
|
tree_intersection/test_tree_intesection.py
|
ravewillow6383/data-structures-and-algorithms-python
|
98533ee241a3ae452dab1ecb87aab39742005e35
|
[
"MIT"
] | null | null | null |
tree_intersection/test_tree_intesection.py
|
ravewillow6383/data-structures-and-algorithms-python
|
98533ee241a3ae452dab1ecb87aab39742005e35
|
[
"MIT"
] | null | null | null |
tree_intersection/test_tree_intesection.py
|
ravewillow6383/data-structures-and-algorithms-python
|
98533ee241a3ae452dab1ecb87aab39742005e35
|
[
"MIT"
] | null | null | null |
from tree_intersection import tree_intersection
from binary_tree import BinaryTree, Node
import pytest
@pytest.fixture()
def tree_one():
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six = Node(6)
seven = Node(7)
eight = Node(8)
nine = Node(9)
ten = Node (10)
eleven = Node(11)
twelve = Node(12)
thirteen = Node(13)
fourteen = Node(14)
fifteen = Node(15)
one.left_child = two
one.right_child = three
two.left_child = four
two.right_child = five
three.left_child = six
three.right_child = seven
four.left_child = eight
four.right_child = nine
five.left_child = ten
five.right_child = eleven
six.left_child = twelve
six.right_child = thirteen
seven.left_child = fourteen
seven.right_child = fifteen
fir = BinaryTree()
fir.root = one
return fir
@pytest.fixture()
def tree_two():
one = Node(13)
two = Node(15)
three = Node(20)
four = Node(21)
five = Node(22)
six = Node(23)
seven = Node(24)
eight = Node(25)
nine = Node(26)
ten = Node (28)
eleven = Node(29)
twelve = Node(12)
thirteen = Node(14)
fourteen = Node(1)
fifteen = Node(2)
one.left_child = two
one.right_child = three
two.left_child = four
two.right_child = five
three.left_child = six
three.right_child = seven
four.left_child = eight
four.right_child = nine
five.left_child = ten
five.right_child = eleven
six.left_child = twelve
six.right_child = thirteen
seven.left_child = fourteen
seven.right_child = fifteen
fir = BinaryTree()
fir.root = one
return fir
def test_exists():
assert tree_intersection
def test_tree_inter(tree_one, tree_two):
assert tree_intersection(tree_one, tree_two) == [2, 1, 12, 13, 14, 15]
def test_single_node():
one = Node(1)
two = Node(1)
tree = BinaryTree()
tree_one = BinaryTree()
tree.root = one
tree_one.root = two
assert tree_intersection(tree_one, tree) == [1]
def test_single_mismatch():
one = Node(1)
two = Node(2)
tree = BinaryTree()
tree_one = BinaryTree()
tree.root = one
tree_one.root = two
with pytest.raises(ValueError):
assert tree_intersection(tree_one, tree)
| 21.675926
| 74
| 0.628364
|
b463822025a9c63fbb6ab4a078582b5537fdd1f7
| 21,883
|
py
|
Python
|
calibrate.py
|
Algomorph/cvcalib
|
4ed638ea523b6d1059556a135576c7afa3a4b07f
|
[
"Apache-2.0"
] | 28
|
2016-07-15T09:03:57.000Z
|
2022-02-07T11:13:26.000Z
|
calibrate.py
|
Algomorph/calib_video_opencv
|
4ed638ea523b6d1059556a135576c7afa3a4b07f
|
[
"Apache-2.0"
] | 2
|
2017-04-19T18:20:07.000Z
|
2018-09-26T13:57:29.000Z
|
calibrate.py
|
Algomorph/calib_video_opencv
|
4ed638ea523b6d1059556a135576c7afa3a4b07f
|
[
"Apache-2.0"
] | 10
|
2016-12-31T00:07:12.000Z
|
2022-03-10T21:49:25.000Z
|
#!/usr/bin/python3
"""
@author: Gregory Kramida
@licence: Apache v2
Copyright 2016 Gregory Kramida
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os.path as osp
import argparse as ap
from enum import Enum
from common.args import required_length, string_arr
from yaml import load, dump
from calib.app_synced import ApplicationSynced
from calib.app_unsynced import ApplicationUnsynced
import re
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
class Argument(object):
def __init__(self, default,
nargs=1,
arg_type=str,
action='store',
arg_help="Documentation N/A",
console_only=False,
required=False,
shorthand=None):
"""
@rtype: Argument
@type name: str
@param name: argument name -- to be used in both console and config file
@type default: object
@param default: the default value
@type nargs: int | str
@param nargs: number of arguments. See python documentation for ArgumentParser.add_argument.
@type arg_type: type | str
@param arg_type: type of value to expect during parsing
@type action: str | function
@param action: action to perform with the argument value during parsing
@type arg_help: str
@param arg_help: documentation for this argument
@type console_only: bool
@param console_only: whether the argument is for console use only or for both config file & console
@type required: bool
@param required: whether the argument is required
@type shorthand: str
@param shorthand: shorthand to use for argument in console
"""
self.default = default
self.required = required
self.console_only = console_only
self.nargs = nargs
self.type = arg_type
self.action = action
self.help = arg_help
if shorthand is None:
self.shorthand = None
else:
self.shorthand = "-" + shorthand
# TODO: investigate enum inheritance. There is too much duplicate code between this script file and others, like
# sync_based_on_audio.py and multistereo.py
class Setting(Enum):
# ================= SETTING FILE STORAGE ==========================================================================#
settings_file = Argument(None, '?', str, 'store',
"File (absolute or relative-to-execution path) where to save and/or " +
"load settings for the program in YAML format.",
console_only=True, required=False)
save_settings = Argument(False, '?', 'bool_flag', 'store_true',
"Save (or update) setting file.",
console_only=True, required=False)
# ================= WORK FOLDER, INPUT & OUTPUT FILES =============================================================#
folder = Argument("./", '?', str, 'store',
"Path to root folder to work in. If set to '!settings_file_location' and a " +
" settings file is provided, will be set to the location of the settings file.",
console_only=False, required=False)
videos = Argument(["left.mp4", "right.mp4"], '+', string_arr, required_length(1, 10),
"Input videos. May be multiple videos for unsynced mode, a stereo video tuple (left, right), " +
"or a single video file, specified relative to the work folder (see 'folder' argument).",
console_only=False, required=False)
input_calibration = Argument(None, '+', string_arr, required_length(1, 10),
"Existing calibration file[s] to initialize calibration parameters. " +
"Optional for synced mode, mandatory for unsynced mode.",
console_only=False, required=False)
output = Argument(None, '?', str, 'store',
"Output file to store calibration results (relative to work folder, see 'folder' setting)",
console_only=False, required=False)
filtered_image_folder = Argument("frames", '?', str, 'store',
"Filtered frames will be saved into this folder (relative to work folder " +
"specified in 'folder'). Synced mode only.",
console_only=False, required=False, shorthand="if")
aux_data_file = Argument("aux.npz", '?', str, 'store',
"File (relative to 'folder') where to load from and/or save to inner corner positions, " +
"calibration time ranges, frame numbers, and other auxiliary data.",
console_only=False, required=False, shorthand="df")
# ============== STORAGE CONTROL FLAGS ============================================================================#
# calibration intervals:
save_calibration_intervals = Argument(False, '?', 'bool_flag', 'store_true',
"Save the calculated time bounds of calibration period within the video for" +
" future re-use.",
console_only=False, required=False)
load_calibration_intervals = Argument(False, '?', 'bool_flag', 'store_true',
"Load the previously-determined time bounds of calibration period within " +
"video (avoids potentially-long computation that seeks out the calibration " +
"in the video)",
console_only=False, required=False)
# frame data
save_frame_data = Argument(False, '?', 'bool_flag', 'store_true',
"Save (or update) the gathered locations of inner board corners and other frame data.",
console_only=False, required=False)
load_frame_data = Argument(False, '?', 'bool_flag', 'store_true',
"Load the previously-gathered locations of inner board corners and other frame data " +
"(skips gathering frame data).",
console_only=False, required=False)
# output calibration
skip_saving_output = Argument(False, '?', 'bool_flag', 'store_true',
"Skip saving the output file. Usually, you don't want to skip that.",
console_only=False, required=False)
# cherry-picked frame images
save_images = Argument(False, '?', 'bool_flag', 'store_true',
"Save images picked out for calibration. Synced mode only.",
console_only=False, required=False)
save_checkerboard_overlays = Argument(default=False, arg_type='bool_flag', action='store_true',
arg_help="Save checkerboard overlays alongside the actual raw frame " +
"images picked out. Only works when `save_images` is enabled.")
load_images = Argument(False, '?', 'bool_flag', 'store_true',
"Load images previously picked out for calibration (skips frame gathering). Synced only.",
console_only=False, required=False)
# TODO: enable saving rvec & tvec of camera pose obtained during calibration
# ============== CALIBRATION PREVIEW ==============================================================================#
preview = Argument(False, '?', 'bool_flag', 'store_true',
"Save (or update) setting file.",
console_only=False, required=False)
preview_files = Argument(["left.png", "right.png"], '+', string_arr, required_length(1, 10),
"Test calibration result on left/right frame pair (currently only for stereo in synced " +
"mode).", console_only=False, required=False)
# ============== BOARD DIMENSIONS =================================================================================#
board_width = Argument(9, '?', int, 'store',
"Checkerboard horizontal inner corner count (width in squares - 1).",
console_only=False, required=False)
board_height = Argument(6, '?', int, 'store',
"Checkerboard vertical inner corner count (height in squares - 1).",
console_only=False, required=False)
board_square_size = Argument(0.0198888, '?', float, 'store',
"Checkerboard square size, in meters.",
console_only=False, required=False)
# ============== FRAME FILTERING CONTROLS ======================================================#
sharpness_threshold = Argument(55.0, '?', float, 'store',
"Sharpness threshold based on variance of " +
"Laplacian; used to filter out frames that are too blurry. Synced mode only.",
console_only=False, required=False, shorthand="fs")
difference_threshold = Argument(.4, '?', float, 'store',
"Per-pixel difference (in range [0,1.0]) between current and previous frames to "
+ "filter out frames that are too much alike. Synced mode only.",
console_only=False, required=False, shorthand="fd")
manual_filter = Argument(False, '?', 'bool_flag', 'store_true',
"Pick which (pre-filtered)frames to use manually" +
"one-by-one (use 'a' key to approve). Synced mode only.",
console_only=False, required=False, shorthand="fm")
frame_count_target = Argument(-1, '?', int, 'store',
"Total number of frames (from either camera) to target for calibration usage." +
"Synced mode only.",
console_only=False, required=False, shorthand="ft")
frame_number_filter = Argument(False, '?', 'bool_flag', 'store_true',
"Use only frame numbers specified in the auxiliary data file.",
console_only=False, required=False, shorthand="fn")
time_range_hint = Argument(None, 2, int, 'store',
"Look at frames only within this time range (in seconds) when seeking exact periods of" +
"calibration in all videos. A good hint will decrease the search time, but any frames " +
"outside the range hint will not be used. Unsynced mode only.",
console_only=False, required=False)
# ============== CALIBRATION & DISTORTION MODEL CONTROLS ==========================================================#
max_iterations = Argument(100, '?', int, 'store',
"Maximum number of iterations for the stereo for calibration (optimization) loop.",
console_only=False, required=False, shorthand="ci")
precalibrate_solo = Argument(False, '?', 'bool_flag', 'store_true',
"calibrate each camera individually (in case of stereo calibration) first, then " +
"perform stereo calibration.",
console_only=False, required=False, shorthand="cs")
stereo_only = Argument(False, '?', 'bool_flag', 'store_true',
"Use in conjunction with the input_calibration option. " +
"Does nothing for single-camera calibration. Synced mode only.",
console_only=False, required=False, shorthand="cso")
use_rational_model = Argument(False, '?', 'bool_flag', 'store_true',
"Use the newer OpenCV rational model (8 distortion coefficients w/ tangential " +
"ones, 6 without) as opposed to the old 3+2 polynomial coefficient model.",
console_only=False, required=False, shorthand="cr")
use_tangential_coeffs = Argument(False, '?', 'bool_flag', 'store_true',
"Use tangential distortion coefficients (usually unnecessary).",
console_only=False, required=False, shorthand="ct")
use_thin_prism = Argument(False, '?', 'bool_flag', 'store_true',
"Use thin prism coefficients / model",
console_only=False, required=False, shorthand="cp")
fix_thin_prism = Argument(False, '?', 'bool_flag', 'store_true',
"Fix the thin prism coefficients",
console_only=False, required=False, shorthand="cfp")
fix_radial = Argument(False, '?', 'bool_flag', 'store_true',
"Fix radial distortion coefficients",
console_only=False, required=False, shorthand="cfr")
# TODO: test fisheye
use_fisheye_model = Argument(False, '?', 'bool_flag', 'store_true',
"Use the fisheye distortion model.",
console_only=False, required=False, shorthand="cf")
test = Argument(False, '?', 'bool_flag', 'store_true',
"Will fix all calibration parameters and run only one iteration, " +
"in order to simply print out the reprojection error. Does not save results.")
# ============== TIME SYNCHRONIZATION CONTROLS ====================================================================#
unsynced = Argument(False, '?', 'bool_flag', 'store_true',
"Used to find extrinsics between multiple unsynchronized cameras."
"The multiple videos need to contain a long sequence of frames" +
"with the calibration board taken during the same session with all " +
"cameras in static positions relative to each-other. However, you must supply reliable " +
"intrinsics for each camera (see input_calibration) and an appropriate max_frame_offset. ",
console_only=False, required=False)
max_frame_offset = Argument(100, '?', int, 'store',
"Used for unsynced calibration only: maximum delay, in frames, between videos.",
console_only=False, required=False)
seek_miss_count = Argument(5, '?', int, arg_help="Increase sensitivity and seek time of calibration intervals")
use_all_frames = Argument(False, '?', 'bool_flag', 'store_true', 'Use all frames (skips calibration seeking)')
# ============== VERBOSITY CONTROLS =============================================================================#
skip_printing_output = Argument(False, '?', 'bool_flag', 'store_true',
"Skip printing output.",
console_only=False, required=False)
@staticmethod
def generate_missing_shorthands():
for item in Setting:
if item.value.shorthand is None:
item.value.shorthand = "-" + "".join([item[1] for item in re.findall(r"(:?^|_)(\w)", item.name)])
@staticmethod
def generate_defaults_dict():
"""
@rtype: dict
@return: dictionary of Setting defaults
"""
dict = {}
for item in Setting:
dict[item.name] = item.value.default
return dict
@staticmethod
def generate_parser(defaults, console_only=False, description="Description N/A", parents=None):
"""
@rtype: argparse.ArgumentParser
@return: either a console-only or a config_file+console parser using the specified defaults and, optionally,
parents.
@type defaults: dict
@param defaults: dictionary of default settings and their values.
For a conf-file+console parser, these come from the config file. For a console-only parser, these are generated.
@type console_only: bool
@param console_only: accept only command-line/terminal arguments, not a configuration file
@type description: str
@param description: description of the program that uses the parser, to be used in the help file
@type parents: list[argparse.ArgumentParser] | None
"""
if console_only:
parser = ap.ArgumentParser(description=description, formatter_class=ap.RawDescriptionHelpFormatter,
add_help=False)
else:
if parents is None:
raise ValueError("A conf-file+console parser requires at least a console-only parser as a parent.")
parser = ap.ArgumentParser(parents=parents)
for item in Setting:
if (item.value.console_only and console_only) or (not item.value.console_only and not console_only):
if item.value.type == 'bool_flag':
parser.add_argument(item.value.shorthand, '--' + item.name, action=item.value.action,
default=defaults[item.name], required=item.value.required,
help=item.value.help)
else:
parser.add_argument(item.value.shorthand, '--' + item.name, action=item.value.action,
type=item.value.type, nargs=item.value.nargs, required=item.value.required,
default=defaults[item.name], help=item.value.help)
if not console_only:
parser.set_defaults(**defaults)
return parser
def load_app_from_config(path):
"""
Generate app directly from config file, bypassing command line settings (useful for testing in ipython)
"""
Setting.generate_missing_shorthands()
defaults = Setting.generate_defaults_dict()
if osp.isfile(path):
file_stream = open(path, "r", encoding="utf-8")
config_defaults = load(file_stream, Loader=Loader)
file_stream.close()
for key, value in config_defaults.items():
defaults[key] = value
else:
raise ValueError("Settings file not found at: {0:s}".format(path))
args = ap.Namespace()
for key, value in defaults.items():
args.__dict__[key] = value
if args.unsynced:
app = ApplicationUnsynced(args)
else:
app = ApplicationSynced(args)
return app
def main():
Setting.generate_missing_shorthands()
defaults = Setting.generate_defaults_dict()
conf_parser = \
Setting.generate_parser(defaults, console_only=True, description=
"Use one or more .mp4 video files to perform calibration: " +
"find the cameras' intrinsics and/or extrinsics.")
# ============== STORAGE/RETRIEVAL OF CONSOLE SETTINGS ===========================================#
args, remaining_argv = conf_parser.parse_known_args()
defaults[Setting.save_settings.name] = args.save_settings
if args.settings_file:
defaults[Setting.settings_file.name] = args.settings_file
if osp.isfile(args.settings_file):
file_stream = open(args.settings_file, "r", encoding="utf-8")
config_defaults = load(file_stream, Loader=Loader)
file_stream.close()
for key, value in config_defaults.items():
defaults[key] = value
else:
raise ValueError("Settings file not found at: {0:s}".format(args.settings_file))
parser = Setting.generate_parser(defaults, parents=[conf_parser])
args = parser.parse_args(remaining_argv)
# process "special" setting values
if args.folder == "!settings_file_location":
if args.settings_file and osp.isfile(args.settings_file):
args.folder = osp.dirname(args.settings_file)
# save settings if prompted to do so
if args.save_settings and args.settings_file:
setting_dict = vars(args)
file_stream = open(args.settings_file, "w", encoding="utf-8")
file_name = setting_dict[Setting.save_settings.name]
del setting_dict[Setting.save_settings.name]
del setting_dict[Setting.settings_file.name]
dump(setting_dict, file_stream, Dumper=Dumper)
file_stream.close()
setting_dict[Setting.save_settings.name] = file_name
setting_dict[Setting.settings_file.name] = True
if args.unsynced:
app = ApplicationUnsynced(args)
app.gather_frame_data()
app.calibrate_time_reprojection(save_data=True)
else:
app = ApplicationSynced(args)
app.gather_frame_data()
app.run_calibration()
return 0
if __name__ == "__main__":
sys.exit(main())
| 57.13577
| 120
| 0.567153
|
2a0163b9a58fca512d611a5b31d559f8465f9b0c
| 9,514
|
py
|
Python
|
src/tools/data_features.py
|
stembl/vibproc
|
2588ad7fad5309a0a56fe5ea3d0a0f4affd10911
|
[
"MIT"
] | null | null | null |
src/tools/data_features.py
|
stembl/vibproc
|
2588ad7fad5309a0a56fe5ea3d0a0f4affd10911
|
[
"MIT"
] | null | null | null |
src/tools/data_features.py
|
stembl/vibproc
|
2588ad7fad5309a0a56fe5ea3d0a0f4affd10911
|
[
"MIT"
] | null | null | null |
## Dataset Features
# Calculate features of the dataset that will allow it to be compared with other datasets.
# Peaks ~ the maximum G value in each event across all rows in each column.
# Mean ~ the average G value in each event across all rows in each column.
# 3Sig_max/min ~ The max and min value 3 sigma for each column across rows and events.
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from scipy import log10
from scipy.signal import welch
from math import floor
from tqdm import trange
def vib_peaks (data, th=1):
## Find impacts over a threshhold.
'''
Assume data is of the form [Time, R1, R2, ... , Rn], where n is the number of response
columns in the file.
th ~ Threshold [G] over which peaks are recorded.
The threshold value is only required when recording over time. When recording impacts
'''
peaks = abs(data).max(axis=1).T
mean = data.mean(axis=1).T
return peaks, mean
def sigma_calc(data, degree):
'''
Calculates sigma of a set of values to a requested degree, i.e. 6 Sigma
Input values can have as many columns as wanted. The calculation will
be performed across the rows.
If a pandas Panel or 3D array is input, calculation will be performed
across the rows and depth.
Returns:
[sigma1, sigma2, ..., sigman] for n columns of values input.
'''
mean = sp.mean(data, axis=0)
std = sp.std(data, axis=0, ddof=1)
for i in range(len(sp.shape(data))-2):
mean = sp.mean(mean, axis=0)
std = sp.std(std, axis=0, ddof=1)
sig = [mean + degree * std, mean - degree * std]
return(sig)
def vib_profiles(profile):
'''
Given a correct vibration profile, returns the profile with two
columns. [freq, psd]
If profile is a .csv file, returns a previously saved file.
'''
vibls = pd.read_csv('../../data/vib_profiles.csv')
input_profile = 0
for i in range(sp.shape(vibls)[1]):
if vibls.columns[i].upper() == profile.upper():
input_profile = vibls.iloc[:,i:i+2].dropna()
#input_profile = input_profile.dropna()
input_profile.columns = ['Freq', 'PSD']
return(input_profile)
if profile[-4:].upper() == '.csv'.upper():
input_profile = pd.read_csv(profile)
return(input_profile)
if type(input_profile) == int:
print('Input vibration profile not found')
input_profile = False
return(input_profile)
def fft_data(data):
dt = float(data.E0.Time[1] - data.E0.Time[0]) # Time Steps, [s]
fs = float(1./dt) # Sampling Frequency
N = int(len(data.E0.Time)) # Number of Samples
sig_len = N/fs # Signal Length [s]
df = 1/sig_len
## FFT, basic
freq = df*sp.arange(0, N, dtype='d')
data_fft = sp.fft(data.iloc[:,:,1:])
f_fft = freq*sp.ones((len(data_fft), len(freq)))
data_fft = sp.concatenate((f_fft[...,None], data_fft), axis=2)
#fft_avg = sp.mean(data_fft, axis=0)
## PSD, basic
f, psd = welch(data.iloc[:,:,1:], fs = fs, nperseg = N, axis=1)
data_psd = sp.zeros((psd.shape[0], psd.shape[1], psd.shape[2]+1))
f_psd = f*sp.ones((len(psd), len(f)))
data_psd = sp.concatenate((f_psd[...,None], psd), axis=2)
return(data_fft, data_psd)
def fft_dataS(data):
tic = time.clock()
# Maximum Time to consider data, will remove any data past
# the last whole second
maxT = int(floor(max(data.Time)))
# Initialize the minimum sample length of all events
samp_min = maxT
# Time Step
dt = float(data.Time[2] - data.Time[1])
fs = float(1./dt) # Sampling Frequency
N = int(len(data.Time)) # Number of Samples
sig_len = N/fs # Signal Length [s]
df = 1/sig_len
## FFT, basic
freq = df*sp.arange(0, N, dtype='d')
data_fft = sp.fft(data.iloc[:,1:])
f_fft = freq*sp.ones((len(data_fft), len(freq)))
data_fft = sp.concatenate((f_fft[...,None], data_fft), axis=2)
#fft_avg = sp.mean(data_fft, axis=0)
## PSD, basic
f, psd = welch(data.iloc[:,1:], fs = fs, nperseg = N, axis=1)
data_psd = sp.zeros((psd.shape[0], psd.shape[1], psd.shape[2]+1))
f_psd = f*sp.ones((len(psd), len(f)))
data_psd = sp.concatenate((f_psd[...,None], psd), axis=2)
return(data_fft, data_psd)
def psd_avg_data(data):
data_fft, data_psd = fft_data(data)
avg_psd = sp.array([[sp.mean(data_psd[:,i,j]) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))])
max_psd = sp.array([[max(abs(data_psd[:,i,j])) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))])
min_psd = sp.array([[min(abs(data_psd[:,i,j])) for j in range(len(data_psd[0,0,:]))] for i in range(len(data_psd[0,:,0]))])
head = list(data.iloc[0].columns)
head[0] = 'Freq'
avg_psd = pd.DataFrame(avg_psd, columns=head)
max_psd = pd.DataFrame(max_psd, columns=head)
min_psd = pd.DataFrame(min_psd, columns=head)
return(avg_psd, max_psd, min_psd)
def grms (freq, PSD):
"""Returns the Grms value for a shaped random vibration input curve.
Input the frequency and PSD values as a list in the form grms(freq, PSD).
The frequency and PSD list must have the same number of elements."""
from math import log10, log
A = 0
if len(freq)!=len(PSD):
print("Error: The number of elements in the Frequency and PSD lists do not match.")
else:
for i in range(1,len(freq)):
# Calculate the slope
dB = 10 * log10(PSD[i]/PSD[i-1]) # dB
OCT = log10(freq[i]/freq[i-1])/log10(2) # Octave
S = dB/OCT # Slope
# Calculate the area in units of [G^2]
if S == 0:
A = A + PSD[i] * (freq[i] - freq[i-1])
elif S == -3:
A = A + -freq[i] * PSD[i] * log(freq[i-1] / freq[i])
else:
A = A + (3 * PSD[i]/(3 + S)) * (freq[i] - (freq[i-1]/freq[i])**(S/3) * freq[i-1])
# Calculate the Grms [G]
grms = A**(0.5)
return(grms)
## Interpolate values of one profile across frequency range of another response.
def vib_trans(resp, profile):
"""
Interpolate the values of the profile across the frequency range of the response. The profile consists
of two lists, a frequency and amplitude. The response consists of the same. This program finds the amplitudes
of the profile at the frequencies of the response. This allows you to compare the amplitudes of the response
and the profile at the same frequencies.
resp = [frequency, amplitude]
profile = [frequency, amplitude]
Returns the transmissibility results Respose / Input Profile.
return([frequency, transmissibility amplitude])
"""
# The number of axis or recordings over which to
num_resp = min([resp.shape[1], profile.shape[1]])-1
transo = []
cols = ['Freq']
for k in range(num_resp):
m0 = [] # Finding the slope of the input profile
for i in range(profile.shape[0]-1):
m0.append((log10(profile.iloc[i+1,k+1])-log10(profile.iloc[i,k+1]))/(log10(profile.Freq[i+1])-log10(profile.Freq[i])))
freq = [] # Initialize the frequency variable
resp_c = [] # Initialize the clipped response variable
m = [] # Initialize the slope variable
x1 = [] # Initialize the frequency used in the point slope equation
y1 = [] # Initialize the amplitude used in the point slope equation
# Find the frequencies and response where which lie within the profile frequency range
for i in trange(len(resp.Freq)):
if resp.Freq[i] >= float(min(profile.Freq)) and resp.Freq[i] < float(max(profile.Freq)):
freq.append(resp.Freq[i])
resp_c.append(resp.iloc[i, k+1])
for j in range(profile.shape[0]-1):
if resp.Freq[i] < profile.Freq[j+1] and resp.Freq[i] >= profile.Freq[j]:
m.append(m0[j])
x1.append(profile.iloc[j+1,0])
y1.append(profile.iloc[j+1,k+1])
# Make sure the slope is recording across the appropriate values.
if len(m)!= len(freq):
print('Error finding slope, len(m) != len(freq)')
print('len m = %i' %len(m))
print('len freq = %i' %len(freq))
resp_int = [] # Initializing the interpolated response variable.
# Calculating the interpolated response given the slope and input profile point
for i in range(len(freq)):
resp_int.append(10**(m[i]*(log10(freq[i])-log10(x1[i])) + log10(y1[i])))
# Converting the list to an array
resp_int = sp.array(resp_int)
resp_c = sp.array(resp_c)
## From Steinberg 1988
# P_out = Q^2 * P
# Solving for Q ->
trans = (resp_c/resp_int)**0.5 # Q ~ Transmissibility of system
if len(transo) == 0:
transo = sp.array((trans), ndmin=2).T
else:
transo = sp.concatenate((transo, sp.array((trans), ndmin=2).T), axis=1)
cols.append('R%i' %k)
return(pd.DataFrame((sp.concatenate((sp.array((freq), ndmin=2).T, transo), axis=1)), columns=cols))
| 36.174905
| 130
| 0.5926
|
4464fa9d17f624ff80d60ee25a6650de5efa6e18
| 1,006
|
py
|
Python
|
xbots/ultrabot_config.py
|
kohloderso/quickbot_bbb
|
430510b1b3bc54f72fd738b8235abecdfa186ff2
|
[
"BSD-3-Clause"
] | null | null | null |
xbots/ultrabot_config.py
|
kohloderso/quickbot_bbb
|
430510b1b3bc54f72fd738b8235abecdfa186ff2
|
[
"BSD-3-Clause"
] | null | null | null |
xbots/ultrabot_config.py
|
kohloderso/quickbot_bbb
|
430510b1b3bc54f72fd738b8235abecdfa186ff2
|
[
"BSD-3-Clause"
] | null | null | null |
# ultrabot
#
# _'_'_ UXfm
# ++++++++++++++
# _/ + + \_
# UXfl _/ + + \_ UXfr
# / + LMP RMP + \
# + __| |__ +
# + | | +
# + | | +
# _|++++ | _ _ | ++++|_
# UXbl _|++++_| T T T T |_++++|_ UXbr
# |++++ | | | | ++++|
# ++++ Ol Or ++++
# + +
# ++++++++++++++++++
#
# ultrasonic
UTbl = "P8_12"
UEbl = "P8_11"
UTfl = "P9_21"
UEfl = "P9_22"
UTfm = "P9_23"
UEfm = "P9_24"
UTfr = "P9_25"
UEfr = "P9_26"
UTbr = "P9_27"
UEbr = "P9_30"
ULTRAS = ((UTbl, UEbl), (UTfl, UEfl), (UTfm, UEfm), (UTfr, UEfr), (UTbr, UEbr))
# encoder aka odometry
Ol = "P9_41"
Or = "P9_42"
# motors
INl1 = "P9_11"
INl2 = "P9_12"
PWMl = "P9_14"
INr1 = "P9_13"
INr2 = "P9_15"
PWMr = "P9_16"
RMP = (INr1, INr2, PWMr)
LMP = (INl1, INl2, PWMl)
# led
LED = "USR1"
| 18.62963
| 79
| 0.336978
|
2f057265fe0bda73bab003446636e8db827b4af8
| 2,197
|
py
|
Python
|
docs/conf.py
|
e-Lisae/cheesyutils
|
75e3d40d4456f2947e6b5130328d2cacd42b9e31
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
e-Lisae/cheesyutils
|
75e3d40d4456f2947e6b5130328d2cacd42b9e31
|
[
"MIT"
] | 5
|
2021-05-07T02:15:39.000Z
|
2022-03-04T02:25:13.000Z
|
docs/conf.py
|
e-Lisae/cheesyutils
|
75e3d40d4456f2947e6b5130328d2cacd42b9e31
|
[
"MIT"
] | 2
|
2021-04-10T23:39:34.000Z
|
2021-07-18T19:35:19.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'CheesyUtils'
copyright = '2021, CheesyGamer77'
author = 'CheesyGamer77'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
import re
version = ''
with open('../src/cheesyutils/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
# The full version, including alpha/beta/rc tags.
release = version
branch = 'master'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'furo'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
numpydoc_show_class_members = False
| 33.287879
| 99
| 0.660446
|
e3a1bc9f60755541165b9f40e6986931f41b199d
| 6,392
|
py
|
Python
|
sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
YYTVicky/kafka
|
b0f3eb276fa034b215570cd4f837851d9fb9166a
|
[
"Apache-2.0"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
axbaretto/presto
|
f137d2709db42b5c3e4d43a631832a8f74853065
|
[
"Apache-2.0"
] | 28
|
2020-03-04T22:01:48.000Z
|
2022-03-12T00:59:47.000Z
|
sdks/python/.tox/docs/lib/python2.7/site-packages/sphinx/pycode/nodes.py
|
axbaretto/presto
|
f137d2709db42b5c3e4d43a631832a8f74853065
|
[
"Apache-2.0"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
# -*- coding: utf-8 -*-
"""
sphinx.pycode.nodes
~~~~~~~~~~~~~~~~~~~
Parse tree node implementations.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
parent = None
def _eq(self, other):
raise NotImplementedError
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
def __ne__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
__hash__ = None
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i - 1]
def get_next_sibling(self):
"""Return next child in parent's children, or None."""
if self.parent is None:
return None
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i + 1]
except IndexError:
return None
def get_prev_leaf(self):
"""Return the leaf node that precedes this node in the parse tree."""
def last_child(node):
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_child(node.children[-1])
if self.parent is None:
return None
prev = self.get_prev_sibling()
if isinstance(prev, Leaf):
return prev
elif prev is not None:
return last_child(prev)
return self.parent.get_prev_leaf()
def get_next_leaf(self):
"""Return self if leaf, otherwise the leaf node that succeeds this
node in the parse tree.
"""
node = self
while not isinstance(node, Leaf):
assert node.children
node = node.children[0]
return node
def get_lineno(self):
"""Return the line number which generated the invocant node."""
return self.get_next_leaf().lineno
def get_prefix(self):
"""Return the prefix of the next leaf node."""
# only leaves carry a prefix
return self.get_next_leaf().prefix
class Node(BaseNode):
"""
Node implementation for nonterminals.
"""
def __init__(self, type, children, context=None):
# type of nonterminals is >= 256
# assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
# assert ch.parent is None, repr(ch)
ch.parent = self
def __repr__(self):
return '%s(%s, %r)' % (self.__class__.__name__,
self.type, self.children)
def __str__(self):
"""This reproduces the input source exactly."""
return ''.join(map(str, self.children))
def _eq(self, other):
return (self.type, self.children) == (other.type, other.children)
# support indexing the node directly instead of .children
def __getitem__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def __len__(self):
return len(self.children)
class Leaf(BaseNode):
"""
Node implementation for leaf nodes (terminals).
"""
prefix = '' # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value, context=None):
# type of terminals is below 256
# assert 0 <= type < 256, type
self.type = type
self.value = value
if context is not None:
self.prefix, (self.lineno, self.column) = context
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.type, self.value, self.prefix)
def __str__(self):
"""This reproduces the input source exactly."""
return self.prefix + str(self.value)
def _eq(self, other):
"""Compares two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def convert(grammar, raw_node):
"""Convert raw node to a Node or Leaf instance."""
type, value, context, children = raw_node
if children or type in grammar.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
def nice_repr(node, number2name, prefix=False):
def _repr(node):
if isinstance(node, Leaf):
return "%s(%r)" % (number2name[node.type], node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_repr, node.children)))
def _prepr(node):
if isinstance(node, Leaf):
return "%s(%r, %r)" % (number2name[node.type],
node.prefix, node.value)
else:
return "%s(%s)" % (number2name[node.type],
', '.join(map(_prepr, node.children)))
return (prefix and _prepr or _repr)(node)
class NodeVisitor(object):
def __init__(self, number2name, *args):
self.number2name = number2name
self.init(*args)
def init(self, *args):
pass
def visit(self, node):
"""Visit a node."""
method = 'visit_' + self.number2name[node.type]
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
for child in node:
self.visit(child)
| 30.583732
| 77
| 0.573529
|
744d81c50901148cbf8f8867f4371c17357fa08d
| 7,089
|
py
|
Python
|
examples/forces_and_energies/training_pytorch.py
|
Iximiel/dscribe
|
1dd845cb918a244714f835023bdc82d95719eef1
|
[
"Apache-2.0"
] | null | null | null |
examples/forces_and_energies/training_pytorch.py
|
Iximiel/dscribe
|
1dd845cb918a244714f835023bdc82d95719eef1
|
[
"Apache-2.0"
] | null | null | null |
examples/forces_and_energies/training_pytorch.py
|
Iximiel/dscribe
|
1dd845cb918a244714f835023bdc82d95719eef1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
torch.manual_seed(7)
# Load the dataset
D_numpy = np.load("D.npy")[:, 0, :] # We only have one SOAP center
n_samples, n_features = D_numpy.shape
E_numpy = np.array([np.load("E.npy")]).T
F_numpy = np.load("F.npy")
dD_dr_numpy = np.load("dD_dr.npy")[:, 0, :, :, :] # We only have one SOAP center
r_numpy = np.load("r.npy")
# Select equally spaced points for training
n_train = 30
idx = np.linspace(0, len(r_numpy) - 1, n_train).astype(int)
D_train_full = D_numpy[idx]
E_train_full = E_numpy[idx]
F_train_full = F_numpy[idx]
r_train_full = r_numpy[idx]
dD_dr_train_full = dD_dr_numpy[idx]
# Standardize input for improved learning. Fit is done only on training data,
# scaling is applied to both descriptors and their derivatives on training and
# test sets.
scaler = StandardScaler().fit(D_train_full)
D_train_full = scaler.transform(D_train_full)
D_whole = scaler.transform(D_numpy)
dD_dr_whole = dD_dr_numpy / scaler.scale_[None, None, None, :]
dD_dr_train_full = dD_dr_train_full / scaler.scale_[None, None, None, :]
# Calculate the variance of energy and force values for the training set. These
# are used to balance their contribution to the MSE loss
var_energy_train = E_train_full.var()
var_force_train = F_train_full.var()
# Subselect 20% of validation points for early stopping.
D_train, D_valid, E_train, E_valid, F_train, F_valid, dD_dr_train, dD_dr_valid = train_test_split(
D_train_full,
E_train_full,
F_train_full,
dD_dr_train_full,
test_size=0.2,
random_state=7,
)
# Create tensors for pytorch
D_whole = torch.Tensor(D_whole)
D_train = torch.Tensor(D_train)
D_valid = torch.Tensor(D_valid)
E_train = torch.Tensor(E_train)
E_valid = torch.Tensor(E_valid)
F_train = torch.Tensor(F_train)
F_valid = torch.Tensor(F_valid)
dD_dr_train = torch.Tensor(dD_dr_train)
dD_dr_valid = torch.Tensor(dD_dr_valid)
class FFNet(torch.nn.Module):
"""A simple feed-forward network with one hidden layer, randomly
initialized weights, sigmoid activation and a linear output layer.
"""
def __init__(self, n_features, n_hidden, n_out):
super(FFNet, self).__init__()
self.linear1 = torch.nn.Linear(n_features, n_hidden)
torch.nn.init.normal_(self.linear1.weight, mean=0, std=1.0)
self.sigmoid = torch.nn.Sigmoid()
self.linear2 = torch.nn.Linear(n_hidden, n_out)
torch.nn.init.normal_(self.linear2.weight, mean=0, std=1.0)
def forward(self, x):
x = self.linear1(x)
x = self.sigmoid(x)
x = self.linear2(x)
return x
def energy_force_loss(E_pred, E_train, F_pred, F_train):
"""Custom loss function that targets both energies and forces.
"""
energy_loss = torch.mean((E_pred - E_train)**2) / var_energy_train
force_loss = torch.mean((F_pred - F_train)**2) / var_force_train
return energy_loss + force_loss
# Initialize model
model = FFNet(n_features, n_hidden=5, n_out=1)
# The Adam optimizer is used for training the model parameters
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
# Train!
n_max_epochs = 5000
batch_size = 2
patience = 20
i_worse = 0
old_valid_loss = float("Inf")
best_valid_loss = float("Inf")
# We explicitly require that the gradients should be calculated for the input
# variables. PyTorch will not do this by default as it is typically not needed.
D_valid.requires_grad = True
# Epochs
for i_epoch in range(n_max_epochs):
# Batches
permutation = torch.randperm(D_train.size()[0])
for i in range(0, D_train.size()[0], batch_size):
indices = permutation[i:i + batch_size]
D_train_batch, E_train_batch = D_train[indices], E_train[indices]
D_train_batch.requires_grad = True
F_train_batch, dD_dr_train_batch = F_train[indices], dD_dr_train[indices]
# Forward pass: Predict energies from the descriptor input
E_train_pred_batch = model(D_train_batch)
# Get derivatives of model output with respect to input variables. The
# torch.autograd.grad-function can be used for this, as it returns the
# gradients of the input with respect to outputs. It is very important
# to set the create_graph=True in this case. Without it the derivatives
# of the NN parameters with respect to the loss from the force error
# will not be populated (=the force error will not affect the
# training), but the model will still run fine without errors.
df_dD_train_batch = torch.autograd.grad(
outputs=E_train_pred_batch,
inputs=D_train_batch,
grad_outputs=torch.ones_like(E_train_pred_batch),
create_graph=True,
)[0]
# Get derivatives of input variables (=descriptor) with respect to atom
# positions = forces
F_train_pred_batch = -torch.einsum('ijkl,il->ijk', dD_dr_train_batch, df_dD_train_batch)
# Zero gradients, perform a backward pass, and update the weights.
# D_train_batch.grad.data.zero_()
optimizer.zero_grad()
loss = energy_force_loss(E_train_pred_batch, E_train_batch, F_train_pred_batch, F_train_batch)
loss.backward()
optimizer.step()
# Check early stopping criterion and save best model
E_valid_pred = model(D_valid)
df_dD_valid = torch.autograd.grad(
outputs=E_valid_pred,
inputs=D_valid,
grad_outputs=torch.ones_like(E_valid_pred),
)[0]
F_valid_pred = -torch.einsum('ijkl,il->ijk', dD_dr_valid, df_dD_valid)
valid_loss = energy_force_loss(E_valid_pred, E_valid, F_valid_pred, F_valid)
if valid_loss < best_valid_loss:
# print("Saving at epoch {}".format(i_epoch))
torch.save(model.state_dict(), "best_model.pt")
best_valid_loss = valid_loss
if valid_loss >= old_valid_loss:
i_worse += 1
else:
i_worse = 0
if i_worse > patience:
print("Early stopping at epoch {}".format(i_epoch))
break
old_valid_loss = valid_loss
if i_epoch % 500 == 0:
print(" Finished epoch: {} with loss: {}".format(i_epoch, loss.item()))
# Way to tell pytorch that we are entering the evaluation phase
model.load_state_dict(torch.load("best_model.pt"))
model.eval()
# Calculate energies and force for the entire range
E_whole = torch.Tensor(E_numpy)
F_whole = torch.Tensor(F_numpy)
dD_dr_whole = torch.Tensor(dD_dr_whole)
D_whole.requires_grad = True
E_whole_pred = model(D_whole)
df_dD_whole = torch.autograd.grad(
outputs=E_whole_pred,
inputs=D_whole,
grad_outputs=torch.ones_like(E_whole_pred),
)[0]
F_whole_pred = -torch.einsum('ijkl,il->ijk', dD_dr_whole, df_dD_whole)
E_whole_pred = E_whole_pred.detach().numpy()
E_whole = E_whole.detach().numpy()
# Save results for later analysis
np.save("r_train_full.npy", r_train_full)
np.save("E_train_full.npy", E_train_full)
np.save("F_train_full.npy", F_train_full)
np.save("E_whole_pred.npy", E_whole_pred)
np.save("F_whole_pred.npy", F_whole_pred)
| 36.168367
| 102
| 0.715051
|
7877a85fa9eb35b5950d83da9d57e20b44c87f78
| 15,866
|
py
|
Python
|
src/tests/test_cli.py
|
mplattner/evengsdk
|
b178eae594a6436b0866659d01e65ac68ffadf31
|
[
"MIT"
] | null | null | null |
src/tests/test_cli.py
|
mplattner/evengsdk
|
b178eae594a6436b0866659d01e65ac68ffadf31
|
[
"MIT"
] | null | null | null |
src/tests/test_cli.py
|
mplattner/evengsdk
|
b178eae594a6436b0866659d01e65ac68ffadf31
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import pytest
from click.testing import CliRunner, Result
from evengsdk.cli.cli import main as cli
from evengsdk.cli.version import __version__
from evengsdk.exceptions import EvengHTTPError
LAB_TO_EDIT = {"name": "lab_to_edit", "path": "/"}
LAB_TO_CREATE = {"name": "test lab1", "path": "/test lab1.unl"}
NODE_TO_CREATE = {
"node_type": "qemu",
"template": "csr1000v",
"image": "csr1000v-universalk9-16.06.06",
"name": "CSR1",
"ethernet": 4,
"cpu": 2,
"serial": 2,
"delay": 0,
}
TEST_CONFIG = """
!
hostname vEOS4
!
"""
@pytest.fixture()
def lab_to_edit():
return LAB_TO_EDIT.copy()
@pytest.fixture()
def cli_client(lab_to_edit, client, request):
client.login(
username=os.environ["EVE_NG_USERNAME"], password=os.environ["EVE_NG_PASSWORD"]
)
return client
@pytest.fixture()
def setup_test_lab(lab_to_edit, cli_client):
cli_client.api.create_lab(**lab_to_edit)
yield
cli_client.login(
username=os.environ["EVE_NG_USERNAME"], password=os.environ["EVE_NG_PASSWORD"]
)
cli_client.api.delete_lab(lab_to_edit["path"] + lab_to_edit["name"])
class TestCli:
def test_entrypoint(self):
"""
Is entrypoint script installed? (setup.py)
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["--help"])
assert result.exit_code == 0
def test_version_displays_library_version(self):
"""
Arrange/Act: Run the `version` subcommand.
Assert: The output matches the library version.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["version"])
assert (
__version__ in result.output.strip()
), "Version number should match library version."
class TestSystemCommands:
def test_system_status(self):
"""
Arrange/Act: Run the `system` command with the 'status' subcommand.
Assert: The output indicates that a status is successfully returned.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["show-status"])
assert result.exit_code == 0, result.output
assert "System" in result.output
def test_system_list_network_types_text_output(self):
"""
Arrange/Act: Run the `system` command with the 'list-network-types'
subcommand.
Assert: The output indicates that network types are successfully
returned.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["list-network-types"])
assert result.exit_code == 0, result.output
def test_system_list_node_templates_text_output(self):
"""
Arrange/Act: Run the `system` command with the 'list-node-templates'
subcommand.
Assert: The output indicates that node templates are successfully
returned.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["list-node-templates"])
assert result.exit_code == 0, result.output
def test_system_list_user_roles_text_output(self):
"""
Arrange/Act: Run the `system` command with the 'user-roles'
subcommand.
Assert: The output indicates that node templates are successfully
returned.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["list-user-roles"])
assert result.exit_code == 0, result.output
def test_system_read_template(self):
"""
Arrange/Act: Run the `system` command with the 'read-template'
subcommand.
Assert: The output indicates that node templates are successfully
returned.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["show-template", "asa"])
assert result.exit_code == 0, result.output
class TestUserCommands:
def test_user_list(self):
"""
Arrange/Act: Run the `user` command with the 'list' subcommand.
Assert: The output indicates that users are listed successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["user", "list"])
assert result.exit_code == 0, result.output
def test_user_create(self):
"""
Arrange/Act: Run the `user` command with the 'create' subcommand.
Assert: The output indicates that user is created successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["user", "create"])
assert result.exit_code == 0, result.output
def test_user_edit(self):
"""
Arrange/Act: Run the `user` command with the 'edit' subcommand.
Assert: The output indicates that user is updated successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["user", "edit"])
assert result.exit_code == 0, result.output
def test_user_read(self):
"""
Arrange/Act: Run the `user` command with the 'read' subcommand.
Assert: The output indicates that user is retrieved successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["user", "read"])
assert result.exit_code == 0, result.output
def test_user_delete(self):
"""
Arrange/Act: Run the `user` command with the 'delete' subcommand.
Assert: The output indicates that user is retrieved successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["user", "delete"])
assert result.exit_code == 0, result.output
class TestLabFolderCommands:
def test_folder_list(self):
"""
Arrange/Act: Run the `folder` command with the 'list' subcommand.
Assert: The output indicates that folders are listed successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["folder", "list"])
assert result.exit_code == 0, result.output
@pytest.mark.xfail
def test_folder_create(self):
"""
Arrange/Act: Run the `folder` command with the 'create' subcommand.
Assert: The output indicates that folders are create successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["folder", "create"])
assert result.exit_code == 0, result.output
@pytest.mark.xfail
def test_folder_read(self):
"""
Arrange/Act: Run the `folder` command with the 'read' subcommand.
Assert: The output indicates that folders are retrieved successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["folder", "read", "/"])
assert result.exit_code == 0, result.output
@pytest.mark.xfail
def test_folder_edit(self):
"""
Arrange/Act: Run the `folder` command with the 'edit' subcommand.
Assert: The output indicates that folder is updated successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["folder", "edit"])
assert result.exit_code == 0, result.output
@pytest.mark.xfail
def test_folder_delete(self):
"""
Arrange/Act: Run the `folder` command with the 'delete' subcommand.
Assert: The output indicates that folder is deleted successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["folder", "delete"])
assert result.exit_code == 0, result.output
class TestLabCommands:
def test_lab_create(self):
"""
Arrange/Act: Run the `lab` command with the 'create' subcommand.
Assert: The output indicates that lab is created successfully.
"""
runner: CliRunner = CliRunner()
cli_args = [
"lab",
"create",
"--author",
"joe tester",
"--name",
f"{LAB_TO_CREATE['name']}",
"--path",
"/",
"--description",
"Test lab",
"--version",
"1",
]
result: Result = runner.invoke(cli, cli_args)
assert result.exit_code == 0 or "Lab already exists" in str(result.output)
def test_lab_create_without_name_raises(self):
"""
Arrange/Act: Run the `lab` command with the 'create' subcommand.
Assert: The output indicates that lab is created successfully.
"""
runner: CliRunner = CliRunner()
cli_args = [
"lab",
"create",
"--author",
"joe tester",
"--path",
"/",
"--description",
"Test lab",
"--version",
"1",
]
result: Result = runner.invoke(cli, cli_args)
assert result.exit_code > 0
assert "invalid or missing mandatory parameters" in result.output
def test_lab_edit(self, setup_test_lab, lab_to_edit):
"""
Arrange/Act: Run the `lab` command with the 'edit' subcommand.
Assert: The output indicates that lab is updated successfully.
"""
edit_cli_args = [
"lab",
"edit",
"--version",
"2",
"--path",
f"{lab_to_edit['path']}{lab_to_edit['name']}.unl",
]
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, edit_cli_args)
assert result.exit_code == 0, result.output
# LAB_TO_EDIT['path'] = f"{LAB_TO_EDIT['path']}{edited_name}.unl"
def test_lab_read(self):
"""
Arrange/Act: Run the `lab` command with the 'read' subcommand.
Assert: The output indicates that lab is retrieved successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(
cli, ["lab", "read", "--path", LAB_TO_CREATE["path"]]
)
assert result.exit_code == 0, result.output
def test_lab_delete(self):
"""
Arrange/Act: Run the `lab` command with the 'delete' subcommand.
Assert: The output indicates that lab is deleted successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(
cli, ["lab", "delete", "--path", LAB_TO_CREATE["path"]]
)
assert result.exit_code == 0, result.output
def test_lab_start(self):
"""
Arrange/Act: Run the `lab` command with the 'start' subcommand.
Assert: The output indicates that lab is started successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["lab", "start"])
assert result.exit_code == 0, result.output
def test_lab_stop(self):
"""
Arrange/Act: Run the `lab` command with the 'stop' subcommand.
Assert: The output indicates that lab is stopped successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["lab", "stop"])
assert result.exit_code == 0, result.output
def test_lab_list(self):
"""
Arrange/Act: Run the `lab` command with the 'list' subcommand.
Assert: The output indicates that labs are listed successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["lab", "list"])
assert result.exit_code == 0, result.output
def test_list_lab_topology(self):
"""
Arrange/Act: Run the `lab` command with the 'topology' subcommand.
Assert: The output indicates that lab topology is retrieved
successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["lab", "topology"])
assert result.exit_code == 0, result.output
@pytest.mark.xfail
def test_lab_export(self):
"""
Arrange/Act: Run the `lab` command with the 'export' subcommand.
Assert: The output indicates that lab exported successfully.
"""
runner: CliRunner = CliRunner()
with runner.isolated_filesystem():
result: Result = runner.invoke(cli, ["lab", "export"])
assert result.exit_code == 0, result.output
assert "Success" in result.output
# def test_lab_import(self):
# """
# Arrange/Act: Run the `lab` command with the 'export' subcommand.
# Assert: The output indicates that lab imported successfully.
# """
# runner: CliRunner = CliRunner()
# cli_commands = ["lab", "import", "--src", "test.zip"]
# result: Result = runner.invoke(cli, cli_commands)
# assert result.exit_code == 0, result.output
class TestLabNodeCommands:
def test_lab_node_create(self):
"""
Arrange/Act: Run the `node` command with the 'create' subcommand.
Assert: The output indicates that lab imported successfully.
"""
cli_commands = [
"node",
"create",
"--node-type",
"qemu",
"--name",
"TEST_CSR",
"--template",
"csr1000v",
"--ethernet",
"4",
]
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, cli_commands)
assert result.exit_code == 0, result.output
def test_lab_node_list(self):
"""
Arrange/Act: Run the `node` command with the 'list' subcommand.
Assert: The output indicates that lab imported successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["node", "list"])
assert result.exit_code == 0, result.output
def test_lab_node_read(self):
"""
Arrange/Act: Run the `node` command with the 'read' subcommand.
Assert: The output indicates that lab retrieved successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["node", "read", "--node-id", "1"])
assert result.exit_code == 0, result.output
def test_lab_node_start_command(self):
"""
Arrange/Act: Run the `node` command with the 'start' subcommand.
Assert: The output indicates that lab started successfully.
"""
cli_commands = ["node", "start", "--node-id", "1"]
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, cli_commands)
assert result.exit_code == 0, result.output
assert "started" in result.output
def test_lab_node_stop_command(self):
"""
Arrange/Act: Run the `node` command with the 'stop' subcommand.
Assert: The output indicates that lab stopped successfully.
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli, ["node", "stop", "--node-id", "1"])
assert result.exit_code == 0, result.output
assert "stopped" in result.output
def test_lab_node_upload_config_command(self):
"""
Arrange/Act: Run the `node` command with the 'upload-config'
subcommand.
Assert: The output indicates that lab started successfully.
"""
runner: CliRunner = CliRunner()
with runner.isolated_filesystem():
with open("config.txt", "w") as f:
f.write(TEST_CONFIG)
cli_commands = ["node", "upload-config", "-n", "1", "--src", "config.txt"]
result: Result = runner.invoke(cli, cli_commands)
assert result.exit_code == 0, result.output
assert "Lab has been saved" in result.output
| 35.257778
| 86
| 0.597378
|
46fc52701acc365808b0706545b6ecf88acea217
| 2,707
|
py
|
Python
|
scripts/pub_demand.py
|
Fingerling42/frontiers-vessel-code
|
5a8fe921d63ae3961a734ca844d074572dc89746
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pub_demand.py
|
Fingerling42/frontiers-vessel-code
|
5a8fe921d63ae3961a734ca844d074572dc89746
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/pub_demand.py
|
Fingerling42/frontiers-vessel-code
|
5a8fe921d63ae3961a734ca844d074572dc89746
|
[
"BSD-3-Clause"
] | 1
|
2020-04-12T15:10:42.000Z
|
2020-04-12T15:10:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
pub_demand.py: service file to automate demand publishing
A provider in Robonomics is looking for corresponding demand and offer messages.
To launch the whole scenario we must publish a demand message.
From the agent's side an offer message will be published
'''
# Standart, System and Third Party
# ROS
import rospy
# Robonomics communication
from robonomics_msgs.msg import Offer, Demand
from ethereum_common.msg import Address, UInt256
from ethereum_common.srv import Accounts, BlockNumber
from ipfs_common.msg import Multihash
MODEL = None or 'QmYb81uDNDHCnu9EZtYV4eoBDKRBAwJeNy1LT3p5Zbc357'
OBJECTIVE = None or 'Qmea8XkcSXmvLDKES7D886pfimsWh9Vjh1ZJsoHm9MWG4C'
TOKEN = None or '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2' # WETH
PRICE = None or 100000000000000000 # 0.1 WETH
LIFETIME = None or 100
if __name__ == '__main__':
rospy.init_node('demand_publisher')
rospy.loginfo('Launching...')
rospy.wait_for_service('/eth/current_block')
rospy.wait_for_service('/eth/accounts')
accounts = rospy.ServiceProxy('/eth/accounts', Accounts)()
rospy.loginfo(str(accounts)) # AIRA ethereum addresses
signing_demand = rospy.Publisher('/liability/infochan/eth/signing/demand', Demand, queue_size=128)
signing_offer = rospy.Publisher('/liability/infochan/eth/signing/offer', Offer, queue_size=128)
rospy.loginfo('Node launched')
model = MODEL or input('Model IPFS hash: ')
objective = OBJECTIVE or input('Objective IPFS hash: ')
token = TOKEN or input('Token: ')
price = PRICE or input('Price: ')
lifetime = LIFETIME or input('Demand lifetime: ')
deadline = str(rospy.ServiceProxy('/eth/current_block', BlockNumber)().number + int(lifetime))
rospy.loginfo('Making demand...')
# Demand message consists of the following fields
demand = Demand()
demand.model = Multihash()
demand.model.multihash = model
demand.objective = Multihash()
demand.objective.multihash = objective
demand.lighthouse = Address()
demand.lighthouse.address = '0xD40AC7F1e5401e03D00F5aeC1779D8e5Af4CF9f1'
demand.token = Address()
demand.token.address = token
demand.cost = UInt256()
demand.cost.uint256 = str(price)
demand.validatorFee = UInt256()
demand.validatorFee.uint256 = '0'
demand.validator = Address()
demand.validator.address = '0x0000000000000000000000000000000000000000'
demand.deadline = UInt256()
demand.deadline.uint256 = deadline
# We ask robonomics_comm to publish the demand message by publishing the message to the ros topic
signing_demand.publish(demand)
rospy.loginfo(demand)
rospy.loginfo('Complete.')
| 34.265823
| 102
| 0.741042
|
4344a2e6705a9f4ab54a1bf82b3ffe121974c2fd
| 2,549
|
py
|
Python
|
andes/formats/txt.py
|
rwl/Andes
|
6818dcc86bd7e1a5028b5be1deee3d6b4171c540
|
[
"Apache-2.0"
] | 1
|
2018-01-11T22:58:17.000Z
|
2018-01-11T22:58:17.000Z
|
andes/formats/txt.py
|
rwl/Andes
|
6818dcc86bd7e1a5028b5be1deee3d6b4171c540
|
[
"Apache-2.0"
] | null | null | null |
andes/formats/txt.py
|
rwl/Andes
|
6818dcc86bd7e1a5028b5be1deee3d6b4171c540
|
[
"Apache-2.0"
] | null | null | null |
from andes.utils.tab import simpletab
from cvxopt import matrix
def format_newline():
return '\n'
def format_title(item):
return item
def format_item(item, val):
return '{:20s} {:s}'.format(item, str(val))
def format_table(header, data, title=None):
# fmt = ".4g"
# return tabulate(data, headers=header, floatfmt=fmt)
table = simpletab(data=data, header=header)
return table.draw()
# return []
# pass
def dump_data(text, header, rowname, data, file):
width = 14
precision = 4
s = ''
out = ''
fid = open(file, 'w')
for Text, Header, Rowname, Data in zip(text, header, rowname, data):
# Write Text
if Text:
fid.writelines(
Text
)
# Write Header
if Header:
ncol = len(Header)
s = ' ' * width
s += '{:>{width}s}' * ncol + '\n'
fid.writelines(s.format(*Header, width=width)) # Mind the asterisk
fid.write('\n')
# Append Rowname to Data
# Data is a list of column lists
if Rowname:
ncol = 0
for idx, item in enumerate(Rowname): # write by row as always
if not Data:
out = ''
elif isinstance(Data[0], list): # list of list in Data
ncol = len(Data)
out = [Data[i][idx] for i in range(ncol)]
elif isinstance(Data[0],
(int, float)): # Is just a list of numbers
ncol = 1
out = [Data[idx]]
elif isinstance(Data, (int, float)):
ncol = 1
out = [Data]
elif isinstance(Data, matrix): # Data is a matrix
pass
else:
print('Unexpected Data during output, in formats/txt.py')
s = '{:{width}s}' # for row header
for col in out:
if isinstance(col, (int, float)):
s += '{:{width}.{precision}g}'
elif type(col) == str:
if len(col) > width:
col = col[:width]
s += '{:{width}s}'
else:
pass
s += '\n'
fid.write(
s.format(
str(item), *out, width=width, precision=precision))
fid.write('\n')
fid.close()
| 28.965909
| 79
| 0.443703
|
933b1e14acf06dabbc8c28aa4e78dff874c4c360
| 789
|
py
|
Python
|
examples/prefix.py
|
aysobay212/discord-super-utils
|
21d97273f330ec5458eb1068f8b6ab4f4a59a713
|
[
"MIT"
] | null | null | null |
examples/prefix.py
|
aysobay212/discord-super-utils
|
21d97273f330ec5458eb1068f8b6ab4f4a59a713
|
[
"MIT"
] | null | null | null |
examples/prefix.py
|
aysobay212/discord-super-utils
|
21d97273f330ec5458eb1068f8b6ab4f4a59a713
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import discordSuperUtils
bot = commands.Bot(command_prefix="-")
PrefixManager = discordSuperUtils.PrefixManager(bot, default_prefix="-", mentioned=True)
@bot.command()
async def prefix(ctx, new_prefix):
new_prefix = new_prefix[:3] # I recommend capping the prefix length to save storage.
await PrefixManager.set_prefix(ctx.guild, new_prefix)
await ctx.send(f"Successfully changed the prefix to '{new_prefix}'")
@bot.event
async def on_ready():
database = discordSuperUtils.DatabaseManager.connect(...)
await PrefixManager.connect_to_database(database, "prefixes")
print('Prefix manager is ready.', bot.user)
@bot.command()
async def ping(ctx):
await ctx.send(f"Pong! ping is {bot.latency * 1000}ms")
bot.run("token")
| 27.206897
| 89
| 0.741445
|
93d356b6e0ca0e1f42b973156f978b2cd7288cb2
| 166
|
py
|
Python
|
articles/gsoc/final-post/__article__.py
|
jakelishman/binhbar
|
c358acdf426062174176b3700ebd9bd7f368c9cf
|
[
"MIT"
] | null | null | null |
articles/gsoc/final-post/__article__.py
|
jakelishman/binhbar
|
c358acdf426062174176b3700ebd9bd7f368c9cf
|
[
"MIT"
] | null | null | null |
articles/gsoc/final-post/__article__.py
|
jakelishman/binhbar
|
c358acdf426062174176b3700ebd9bd7f368c9cf
|
[
"MIT"
] | null | null | null |
{
"id": "ffb8a3",
"title": "QuTiP data layer and the end of Google Summer of Code 2020",
"date": "2020-08-28",
"tags": ["QuTiP", "GSoC", "python"],
}
| 23.714286
| 74
| 0.548193
|
3da24df465fa3c8a135c6a985e5f886701493313
| 1,892
|
py
|
Python
|
test/selenium/src/lib/page/widget/page_mixins.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/page/widget/page_mixins.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/selenium/src/lib/page/widget/page_mixins.py
|
j0gurt/ggrc-core
|
84662dc85aa8864c907eabe70b8efccf92298a1f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Mixins for info page objects"""
# pylint: disable=too-few-public-methods
from lib import base
from lib.element import page_elements
class WithPageElements(base.WithBrowser):
"""A mixin for page elements"""
def _related_people_list(self, label):
"""Return RelatedPeopleList page element with label `label`"""
return page_elements.RelatedPeopleList(self._browser, label)
def _related_urls(self, label):
"""Return RelatedUrls page element with label `label`"""
return page_elements.RelatedUrls(self._browser, label)
def _assessment_evidence_urls(self):
"""Return AssessmentEvidenceUrls page element"""
return page_elements.AssessmentEvidenceUrls(self._browser)
def _comment_area(self):
"""Return CommentArea page element"""
return page_elements.CommentArea(self._browser)
def _simple_field(self, label):
"""Returns SimpleField page element."""
return page_elements.SimpleField(self._browser, label)
def _info_pane_form_field(self, label):
"""Returns InfoPaneFormField page element."""
return page_elements.InfoPaneFormField(self._browser, label)
def _assessment_form_field(self, label):
"""Returns AssessmentFormField page element."""
return page_elements.AssessmentFormField(self._browser, label)
class WithAssignFolder(base.WithBrowser):
"""A mixin for `Assign Folder`"""
def __init__(self, driver):
super(WithAssignFolder, self).__init__(driver)
self.assign_folder_button = self._browser.element(
class_name="mapped-folder__add-button")
class WithObjectReview(base.WithBrowser):
"""A mixin for object reviews"""
def __init__(self, driver):
super(WithObjectReview, self).__init__(driver)
self.request_review_btn = self._browser.button(text="Request Review")
| 33.192982
| 78
| 0.752114
|
c4ba2a014e59b48a6f302bd79125d0707eda54d2
| 2,221
|
py
|
Python
|
day-11/octopus.py
|
PeterPrice06/advent-of-code-2021
|
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
|
[
"MIT"
] | null | null | null |
day-11/octopus.py
|
PeterPrice06/advent-of-code-2021
|
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
|
[
"MIT"
] | null | null | null |
day-11/octopus.py
|
PeterPrice06/advent-of-code-2021
|
d1af22ee2e4778372e626debca1ae9dc7f2ad47c
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
class DumboOctopi():
def __init__(self, line_strs: List[str]) -> None:
self.octopi_energy = [[int(char) for char in line_strs] for line_strs in line_strs]
def tick(self) -> int:
flashes = 0
# self.increase_energy()
flashes = self.increase_energy_all()
self.exhaust_energy()
return flashes
def increase_energy(self) -> None:
for row in range(len(self.octopi_energy)):
for col in range(len(self.octopi_energy[row])):
self.octopi_energy[row][col] += 1
def increase_energy_all(self) -> int:
flashes = 0
for row in range(len(self.octopi_energy)):
for col in range(len(self.octopi_energy[row])):
flashes += self.flash(row, col)
return flashes
def flash(self, row: int, col: int) -> int:
if not self.in_bounds(row, col):
return 0
self.octopi_energy[row][col] += 1
if self.octopi_energy[row][col] == 9 + 1:
flashes = 1
flashes += self.flash(row - 1, col)
flashes += self.flash(row, col - 1)
flashes += self.flash(row + 1, col)
flashes += self.flash(row, col + 1)
flashes += self.flash(row - 1, col - 1)
flashes += self.flash(row + 1, col - 1)
flashes += self.flash(row + 1, col + 1)
flashes += self.flash(row - 1, col + 1)
return flashes
return 0
def exhaust_energy(self) -> None:
for row in range(len(self.octopi_energy)):
for col in range(len(self.octopi_energy[row])):
if self.octopi_energy[row][col] > 9:
self.octopi_energy[row][col] = 0
def in_bounds(self, row: int, col: int) -> bool:
if row >= 0 and row < len(self.octopi_energy) and col >= 0 and col < len(self.octopi_energy[row]):
return True
else:
return False
def __str__(self) -> str:
# print the octopi energy levels with a newline after each row and no space between columns
return '\n'.join([''.join([str(char) for char in row]) for row in self.octopi_energy])
| 37.016667
| 106
| 0.561459
|
85315c0bf7bf9ccc7270acd6abb867f00126f666
| 264
|
py
|
Python
|
seguridad/helpers.py
|
luisfarfan/cpv_seguridad
|
3b499057251894b27a13c64efac38c347d5796b0
|
[
"MIT"
] | 1
|
2016-09-20T22:45:06.000Z
|
2016-09-20T22:45:06.000Z
|
seguridad/helpers.py
|
luisfarfan/cpv_seguridad
|
3b499057251894b27a13c64efac38c347d5796b0
|
[
"MIT"
] | null | null | null |
seguridad/helpers.py
|
luisfarfan/cpv_seguridad
|
3b499057251894b27a13c64efac38c347d5796b0
|
[
"MIT"
] | null | null | null |
from datetime import datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
| 26.4
| 75
| 0.700758
|
8147fdf7bb6b61270daf60177fa4b41e2b04d3af
| 5,089
|
py
|
Python
|
tests/unit/test_analyzer_scores.py
|
LaudateCorpus1/quakestats
|
d4e44d593e6c7334628d34b5ec648ade5976003e
|
[
"MIT"
] | 21
|
2018-04-24T09:33:01.000Z
|
2022-03-05T10:53:45.000Z
|
tests/unit/test_analyzer_scores.py
|
brabiega/quakestats
|
1628720350a1e4e40ebebdb7988785663892f0be
|
[
"MIT"
] | 42
|
2018-04-13T18:09:19.000Z
|
2021-08-05T20:23:22.000Z
|
tests/unit/test_analyzer_scores.py
|
LaudateCorpus1/quakestats
|
d4e44d593e6c7334628d34b5ec648ade5976003e
|
[
"MIT"
] | 8
|
2018-06-12T18:07:39.000Z
|
2021-08-28T02:26:17.000Z
|
from quakestats.dataprovider.analyzer.events import Event
from quakestats.dataprovider.analyzer.scores import PlayerScores
def gen_switch_team(time, player_id, old_team, new_team):
return Event.from_dict({
'TYPE': 'PLAYER_SWITCHTEAM',
'DATA': {
'TIME': time,
'KILLER': {
'STEAM_ID': player_id,
'OLD_TEAM': old_team,
'TEAM': new_team,
}
}
})
def gen_kill(time, killer_id, victim_id, mod):
return Event.from_dict({
'TYPE': 'PLAYER_KILL',
'DATA': {
'TIME': time,
'VICTIM': {
'STEAM_ID': victim_id,
},
'KILLER': {
'STEAM_ID': killer_id,
},
'MOD': mod,
},
})
def gen_death(time, killer_id, victim_id, mod):
return Event.from_dict({
'TYPE': 'PLAYER_DEATH',
'DATA': {
'TIME': time,
'VICTIM': {
'STEAM_ID': victim_id,
},
'KILLER': {
'STEAM_ID': killer_id,
},
'MOD': mod,
},
})
def gen_disconnect(time, player_id):
return Event.from_dict({
'TYPE': 'PLAYER_DISCONNECT',
'DATA': {'TIME': time, 'STEAM_ID': player_id},
})
class TestPlayerScores():
def test_from_player_kill(self):
ps = PlayerScores()
assert ps.kdr['A'].r == 0
ps.from_player_kill(gen_kill(1, 'A', 'B', 'SHOTGUN'))
assert len(ps.scores) == 1
assert ps.player_score['A'] == [1, 1]
assert ps.scores[0] == (1, 'A', 1, 'SHOTGUN')
assert len(ps.kills) == 1
assert ps.kills[0] == (1, 'A', 'B', 'SHOTGUN')
assert ps.kdr['A'].r == 1
ps.from_player_kill(gen_kill(2, 'A', 'C', 'MOD3'))
assert len(ps.scores) == 2
assert ps.player_score['A'] == [2, 2]
assert ps.scores[1] == (2, 'A', 2, 'MOD3')
assert len(ps.kills) == 2
assert ps.kills[1] == (2, 'A', 'C', 'MOD3')
assert ps.kdr['A'].r == 2
ps.from_player_kill(gen_kill(2, 'B', 'A', 'MOD3'))
assert len(ps.scores) == 3
assert ps.player_score['B'] == [1, 2]
assert ps.scores[2] == (2, 'B', 1, 'MOD3')
assert len(ps.kills) == 3
assert ps.kills[2] == (2, 'B', 'A', 'MOD3')
def test_from_player_kill_selfkill(self):
ps = PlayerScores()
ps.from_player_kill(gen_kill(2, 'A', 'A', 'SHOTGUN'))
assert ps.player_score['A'] == [0, 0]
assert ps.kills == [(2, 'A', 'A', 'SHOTGUN')]
assert ps.scores == []
def test_from_player_swtichteam(self):
ps = PlayerScores()
ps.player_score['A'] = [10, 0]
ps.from_player_switchteam(gen_switch_team(3, 'A', 'Free', 'Spect'))
assert ps.scores[0] == (3, 'A', 0, 'SWITCHTEAM')
ps.player_score['A'] == [0, 0]
def test_from_player_disconnect(self):
ps = PlayerScores()
ps.match_duration = 100
ps.player_score['A'] = [10, 0]
ps.from_player_disconnect(gen_disconnect(3, 'A'))
assert ps.scores[0] == (3, 'A', 0, 'DISCONNECTED')
assert ps.player_score['A'] == [0, 0]
ps.player_score['A'] = [10, 1]
ps.from_player_disconnect(gen_disconnect(300, 'A'))
assert ps.player_score['A'] == [10, 1]
def test_from_player_death_world(self):
ps = PlayerScores()
ps.from_player_death(gen_death(3, 'q3-world', 'B', 'FALLING'))
assert ps.scores[0] == (3, 'B', -1, 'FALLING')
assert ps.player_score['B'] == [-1, 0]
assert ps.deaths[0] == (3, 'q3-world', 'B', 'FALLING')
def test_from_player_death_selfkill(self):
ps = PlayerScores()
ps.from_player_death(gen_death(3, 'B', 'B', 'FALLING'))
assert ps.scores[0] == (3, 'B', -1, 'FALLING')
assert ps.player_score['B'] == [-1, 0]
assert ps.deaths[0] == (3, 'B', 'B', 'FALLING')
def test_from_player_death(self):
ps = PlayerScores()
ps.from_player_death(gen_death(3, 'A', 'B', 'MOD_ROCKET'))
assert ps.scores == []
assert ps.player_score['B'] == [0, 0]
assert ps.deaths[0] == (3, 'A', 'B', 'MOD_ROCKET')
def test_players_sorted_by_score(self):
ps = PlayerScores()
ps.match_duration = 900
ps.from_player_kill(gen_kill(1, 'A', 'B', 'SHOTGUN'))
ps.from_player_kill(gen_kill(2, 'A', 'B', 'SHOTGUN'))
ps.from_player_kill(gen_kill(1, 'B', 'C', 'SHOTGUN'))
assert ps.players_sorted_by_score() == ['A', 'B']
ps.from_player_kill(gen_kill(3, 'B', 'C', 'SHOTGUN'))
assert ps.players_sorted_by_score() == ['B', 'A']
ps.from_player_kill(gen_kill(4, 'A', 'B', 'SHOTGUN'))
assert ps.players_sorted_by_score() == ['A', 'B']
ps.from_player_disconnect(gen_disconnect(10, 'A'))
assert ps.players_sorted_by_score() == ['B']
ps.from_player_switchteam(gen_switch_team(10, 'B', 'Free', 'Spect'))
assert ps.players_sorted_by_score() == []
| 31.80625
| 76
| 0.531342
|
dee63077fe49dcabece3299c87d3966ca2089b1b
| 656
|
py
|
Python
|
src/unicon/plugins/dell/settings.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 18
|
2019-11-23T23:14:53.000Z
|
2022-01-10T01:17:08.000Z
|
src/unicon/plugins/dell/settings.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 12
|
2020-11-09T20:39:25.000Z
|
2022-03-22T12:46:59.000Z
|
src/unicon/plugins/dell/settings.py
|
TestingBytes/unicon.plugins
|
0600956d805deb4fd790aa3ef591c5d659e85de1
|
[
"Apache-2.0"
] | 32
|
2020-02-12T15:42:22.000Z
|
2022-03-15T16:42:10.000Z
|
'''
Author: Knox Hutchinson
Contact: https://dataknox.dev
https://twitter.com/data_knox
https://youtube.com/c/dataknox
Contents largely inspired by sample Unicon repo:
https://github.com/CiscoDevNet/pyats-plugin-examples/tree/master/unicon_plugin_example/src/unicon_plugin_example
'''
from unicon.plugins.generic.settings import GenericSettings
class DellSettings(GenericSettings):
def __init__(self):
# inherit any parent settings
super().__init__()
self.CONNECTION_TIMEOUT = 60*5
self.ESCAPE_CHAR_CALLBACK_PRE_SENDLINE_PAUSE_SEC = 1
self.HA_INIT_EXEC_COMMANDS = []
self.HA_INIT_CONFIG_COMMANDS = []
| 31.238095
| 112
| 0.751524
|
6e79d4555637b5d0aa4465c0826004509f35422c
| 1,048
|
py
|
Python
|
tests/formatters/winrestore.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | 27
|
2019-04-05T12:01:49.000Z
|
2022-02-08T02:26:25.000Z
|
tests/formatters/winrestore.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | null | null | null |
tests/formatters/winrestore.py
|
berggren/plaso
|
2658c80c5076f97a9a27272e73997bde8c39e875
|
[
"Apache-2.0"
] | 8
|
2019-11-28T08:06:34.000Z
|
2020-08-29T13:53:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows Restore Point (rp.log) file event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import winrestore
from tests.formatters import test_lib
class RestorePointInfoFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Windows Restore Point information event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = winrestore.RestorePointInfoFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = winrestore.RestorePointInfoFormatter()
expected_attribute_names = [
'description',
'restore_point_event_type',
'restore_point_type']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
| 26.871795
| 72
| 0.754771
|
45f51eef2fc4253b1259923930160db74a331b40
| 7,819
|
py
|
Python
|
tests/regressiontests/forms/tests/regressions.py
|
kix/django
|
5262a288df07daa050a0e17669c3f103f47a8640
|
[
"BSD-3-Clause"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
AppServer/lib/django-1.5/tests/regressiontests/forms/tests/regressions.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
AppServer/lib/django-1.5/tests/regressiontests/forms/tests/regressions.py
|
nlake44/appscale
|
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
|
[
"Apache-2.0"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from warnings import catch_warnings
from django.forms import *
from django.test import TestCase
from django.utils.translation import ugettext_lazy, override
from regressiontests.forms.models import Cheese
class FormsRegressionsTestCase(TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('Username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with override('pl', deactivate=True):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Nazwa u\u017cytkownika:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul>\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
with catch_warnings(record=True):
# Ignore UnicodeWarning
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
# Translated error messages used to be buggy.
with override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul>\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
from django.utils.translation import gettext_lazy
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from regressiontests.forms.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
| 51.781457
| 641
| 0.628213
|
d282f5d277f915751b7b8aa8f57a75c0086f76c0
| 32,983
|
py
|
Python
|
deephistopath/wsi/slide.py
|
revsearchproject/deep-histopath
|
03ad6ce172e21fcb3e0f572f8b46031fbcc31b4a
|
[
"Apache-2.0"
] | null | null | null |
deephistopath/wsi/slide.py
|
revsearchproject/deep-histopath
|
03ad6ce172e21fcb3e0f572f8b46031fbcc31b4a
|
[
"Apache-2.0"
] | null | null | null |
deephistopath/wsi/slide.py
|
revsearchproject/deep-histopath
|
03ad6ce172e21fcb3e0f572f8b46031fbcc31b4a
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------------
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
import glob
import math
import matplotlib.pyplot as plt
import multiprocessing
import numpy as np
import openslide
from openslide import OpenSlideError
import os
import PIL
from PIL import Image
import re
import sys
from deephistopath.wsi import util
from deephistopath.wsi.util import Time
BASE_DIR = os.path.join(".", "data")
# BASE_DIR = os.path.join(os.sep, "Volumes", "BigData", "TUPAC")
TRAIN_PREFIX = "CN01-"
SRC_TRAIN_DIR = os.path.join(BASE_DIR, "training_slides")
SRC_TRAIN_EXT = "svs"
DEST_TRAIN_SUFFIX = "" # Example: "train-"
DEST_TRAIN_EXT = "png"
SCALE_FACTOR = 32
DEST_TRAIN_DIR = os.path.join(BASE_DIR, "training_" + DEST_TRAIN_EXT)
THUMBNAIL_SIZE = 300
THUMBNAIL_EXT = "jpg"
DEST_TRAIN_THUMBNAIL_DIR = os.path.join(BASE_DIR, "training_thumbnail_" + THUMBNAIL_EXT)
FILTER_SUFFIX = "" # Example: "filter-"
FILTER_RESULT_TEXT = "filtered"
FILTER_DIR = os.path.join(BASE_DIR, "filter_" + DEST_TRAIN_EXT)
FILTER_THUMBNAIL_DIR = os.path.join(BASE_DIR, "filter_thumbnail_" + THUMBNAIL_EXT)
FILTER_PAGINATION_SIZE = 50
FILTER_PAGINATE = True
FILTER_HTML_DIR = BASE_DIR
TILE_SUMMARY_DIR = os.path.join(BASE_DIR, "tile_summary_" + DEST_TRAIN_EXT)
TILE_SUMMARY_ON_ORIGINAL_DIR = os.path.join(BASE_DIR, "tile_summary_on_original_" + DEST_TRAIN_EXT)
TILE_SUMMARY_SUFFIX = "tile_summary"
TILE_SUMMARY_THUMBNAIL_DIR = os.path.join(BASE_DIR, "tile_summary_thumbnail_" + THUMBNAIL_EXT)
TILE_SUMMARY_ON_ORIGINAL_THUMBNAIL_DIR = os.path.join(BASE_DIR, "tile_summary_on_original_thumbnail_" + THUMBNAIL_EXT)
TILE_SUMMARY_PAGINATION_SIZE = 50
TILE_SUMMARY_PAGINATE = True
TILE_SUMMARY_HTML_DIR = BASE_DIR
TILE_DATA_DIR = os.path.join(BASE_DIR, "tile_data")
TILE_DATA_SUFFIX = "tile_data"
TOP_TILES_SUFFIX = "top_tile_summary"
TOP_TILES_DIR = os.path.join(BASE_DIR, TOP_TILES_SUFFIX + "_" + DEST_TRAIN_EXT)
TOP_TILES_THUMBNAIL_DIR = os.path.join(BASE_DIR, TOP_TILES_SUFFIX + "_thumbnail_" + THUMBNAIL_EXT)
TOP_TILES_ON_ORIGINAL_DIR = os.path.join(BASE_DIR, TOP_TILES_SUFFIX + "_on_original_" + DEST_TRAIN_EXT)
TOP_TILES_ON_ORIGINAL_THUMBNAIL_DIR = os.path.join(BASE_DIR,
TOP_TILES_SUFFIX + "_on_original_thumbnail_" + THUMBNAIL_EXT)
TILE_DIR = os.path.join(BASE_DIR, "tiles_" + DEST_TRAIN_EXT)
TILE_SUFFIX = "tile"
STATS_DIR = os.path.join(BASE_DIR, "svs_stats")
def open_slide(filename):
"""
Open a whole-slide image (*.svs, etc).
Args:
filename: Name of the slide file.
Returns:
An OpenSlide object representing a whole-slide image.
"""
try:
slide = openslide.open_slide(filename)
except OpenSlideError:
slide = None
except FileNotFoundError:
slide = None
return slide
def open_image(filename):
"""
Open an image (*.jpg, *.png, etc).
Args:
filename: Name of the image file.
returns:
A PIL.Image.Image object representing an image.
"""
image = Image.open(filename)
return image
def open_image_np(filename):
"""
Open an image (*.jpg, *.png, etc) as an RGB NumPy array.
Args:
filename: Name of the image file.
returns:
A NumPy representing an RGB image.
"""
pil_img = open_image(filename)
np_img = util.pil_to_np_rgb(pil_img)
return np_img
def get_training_slide_path(slide_number):
"""
Convert slide number to a path to the corresponding WSI training slide file.
Example:
5 -> ../data/training_slides/TUPAC-TR-005.svs
Args:
slide_number: The slide number.
Returns:
Path to the WSI training slide file.
"""
padded_sl_num = str(slide_number).zfill(3)
slide_filepath = os.path.join(SRC_TRAIN_DIR, TRAIN_PREFIX + padded_sl_num + "." + SRC_TRAIN_EXT)
return slide_filepath
def get_tile_image_path(tile):
"""
Obtain tile image path based on tile information such as row, column, row pixel position, column pixel position,
pixel width, and pixel height.
Args:
tile: Tile object.
Returns:
Path to image tile.
"""
t = tile
padded_sl_num = str(t.slide_num).zfill(3)
tile_path = os.path.join(TILE_DIR, padded_sl_num,
TRAIN_PREFIX + padded_sl_num + "-" + TILE_SUFFIX + "-r%d-c%d-x%d-y%d-w%d-h%d" % (
t.r, t.c, t.o_c_s, t.o_r_s, t.o_c_e - t.o_c_s, t.o_r_e - t.o_r_s) + "." + DEST_TRAIN_EXT)
return tile_path
def get_tile_image_path_by_slide_row_col(slide_number, row, col):
"""
Obtain tile image path using wildcard lookup with slide number, row, and column.
Args:
slide_number: The slide number.
row: The row.
col: The column.
Returns:
Path to image tile.
"""
padded_sl_num = str(slide_number).zfill(3)
wilcard_path = os.path.join(TILE_DIR, padded_sl_num,
TRAIN_PREFIX + padded_sl_num + "-" + TILE_SUFFIX + "-r%d-c%d-*." % (
row, col) + DEST_TRAIN_EXT)
img_path = glob.glob(wilcard_path)[0]
return img_path
def get_training_image_path(slide_number, large_w=None, large_h=None, small_w=None, small_h=None):
"""
Convert slide number and optional dimensions to a training image path. If no dimensions are supplied,
the corresponding file based on the slide number will be looked up in the file system using a wildcard.
Example:
5 -> ../data/training_png/TUPAC-TR-005-32x-49920x108288-1560x3384.png
Args:
slide_number: The slide number.
large_w: Large image width.
large_h: Large image height.
small_w: Small image width.
small_h: Small image height.
Returns:
Path to the image file.
"""
padded_sl_num = str(slide_number).zfill(3)
if large_w is None and large_h is None and small_w is None and small_h is None:
wildcard_path = os.path.join(DEST_TRAIN_DIR, TRAIN_PREFIX + padded_sl_num + "*." + DEST_TRAIN_EXT)
img_path = glob.glob(wildcard_path)[0]
else:
img_path = os.path.join(DEST_TRAIN_DIR, TRAIN_PREFIX + padded_sl_num + "-" + str(
SCALE_FACTOR) + "x-" + DEST_TRAIN_SUFFIX + str(
large_w) + "x" + str(large_h) + "-" + str(small_w) + "x" + str(small_h) + "." + DEST_TRAIN_EXT)
return img_path
def get_training_thumbnail_path(slide_number, large_w=None, large_h=None, small_w=None, small_h=None):
"""
Convert slide number and optional dimensions to a training thumbnail path. If no dimensions are
supplied, the corresponding file based on the slide number will be looked up in the file system using a wildcard.
Example:
5 -> ../data/training_thumbnail_jpg/TUPAC-TR-005-32x-49920x108288-1560x3384.jpg
Args:
slide_number: The slide number.
large_w: Large image width.
large_h: Large image height.
small_w: Small image width.
small_h: Small image height.
Returns:
Path to the thumbnail file.
"""
padded_sl_num = str(slide_number).zfill(3)
if large_w is None and large_h is None and small_w is None and small_h is None:
wilcard_path = os.path.join(DEST_TRAIN_THUMBNAIL_DIR, TRAIN_PREFIX + padded_sl_num + "*." + THUMBNAIL_EXT)
img_path = glob.glob(wilcard_path)[0]
else:
img_path = os.path.join(DEST_TRAIN_THUMBNAIL_DIR, TRAIN_PREFIX + padded_sl_num + "-" + str(
SCALE_FACTOR) + "x-" + DEST_TRAIN_SUFFIX + str(
large_w) + "x" + str(large_h) + "-" + str(small_w) + "x" + str(small_h) + "." + THUMBNAIL_EXT)
return img_path
def get_filter_image_path(slide_number, filter_number, filter_name_info):
"""
Convert slide number, filter number, and text to a path to a filter image file.
Example:
5, 1, "rgb" -> ../data/filter_png/TUPAC-TR-005-001-rgb.png
Args:
slide_number: The slide number.
filter_number: The filter number.
filter_name_info: Descriptive text describing filter.
Returns:
Path to the filter image file.
"""
dir = FILTER_DIR
if not os.path.exists(dir):
os.makedirs(dir)
img_path = os.path.join(dir, get_filter_image_filename(slide_number, filter_number, filter_name_info))
return img_path
def get_filter_thumbnail_path(slide_number, filter_number, filter_name_info):
"""
Convert slide number, filter number, and text to a path to a filter thumbnail file.
Example:
5, 1, "rgb" -> ../data/filter_thumbnail_jpg/TUPAC-TR-005-001-rgb.jpg
Args:
slide_number: The slide number.
filter_number: The filter number.
filter_name_info: Descriptive text describing filter.
Returns:
Path to the filter thumbnail file.
"""
dir = FILTER_THUMBNAIL_DIR
if not os.path.exists(dir):
os.makedirs(dir)
img_path = os.path.join(dir, get_filter_image_filename(slide_number, filter_number, filter_name_info, thumbnail=True))
return img_path
def get_filter_image_filename(slide_number, filter_number, filter_name_info, thumbnail=False):
"""
Convert slide number, filter number, and text to a filter file name.
Example:
5, 1, "rgb", False -> TUPAC-TR-005-001-rgb.png
5, 1, "rgb", True -> TUPAC-TR-005-001-rgb.jpg
Args:
slide_number: The slide number.
filter_number: The filter number.
filter_name_info: Descriptive text describing filter.
thumbnail: If True, produce thumbnail filename.
Returns:
The filter image or thumbnail file name.
"""
if thumbnail:
ext = THUMBNAIL_EXT
else:
ext = DEST_TRAIN_EXT
padded_sl_num = str(slide_number).zfill(3)
padded_fi_num = str(filter_number).zfill(3)
img_filename = TRAIN_PREFIX + padded_sl_num + "-" + padded_fi_num + "-" + FILTER_SUFFIX + filter_name_info + "." + ext
return img_filename
def get_tile_summary_image_path(slide_number):
"""
Convert slide number to a path to a tile summary image file.
Example:
5 -> ../data/tile_summary_png/TUPAC-TR-005-tile_summary.png
Args:
slide_number: The slide number.
Returns:
Path to the tile summary image file.
"""
if not os.path.exists(TILE_SUMMARY_DIR):
os.makedirs(TILE_SUMMARY_DIR)
img_path = os.path.join(TILE_SUMMARY_DIR, get_tile_summary_image_filename(slide_number))
return img_path
def get_tile_summary_thumbnail_path(slide_number):
"""
Convert slide number to a path to a tile summary thumbnail file.
Example:
5 -> ../data/tile_summary_thumbnail_jpg/TUPAC-TR-005-tile_summary.jpg
Args:
slide_number: The slide number.
Returns:
Path to the tile summary thumbnail file.
"""
if not os.path.exists(TILE_SUMMARY_THUMBNAIL_DIR):
os.makedirs(TILE_SUMMARY_THUMBNAIL_DIR)
img_path = os.path.join(TILE_SUMMARY_THUMBNAIL_DIR, get_tile_summary_image_filename(slide_number, thumbnail=True))
return img_path
def get_tile_summary_on_original_image_path(slide_number):
"""
Convert slide number to a path to a tile summary on original image file.
Example:
5 -> ../data/tile_summary_on_original_png/TUPAC-TR-005-tile_summary.png
Args:
slide_number: The slide number.
Returns:
Path to the tile summary on original image file.
"""
if not os.path.exists(TILE_SUMMARY_ON_ORIGINAL_DIR):
os.makedirs(TILE_SUMMARY_ON_ORIGINAL_DIR)
img_path = os.path.join(TILE_SUMMARY_ON_ORIGINAL_DIR, get_tile_summary_image_filename(slide_number))
return img_path
def get_tile_summary_on_original_thumbnail_path(slide_number):
"""
Convert slide number to a path to a tile summary on original thumbnail file.
Example:
5 -> ../data/tile_summary_on_original_thumbnail_jpg/TUPAC-TR-005-tile_summary.jpg
Args:
slide_number: The slide number.
Returns:
Path to the tile summary on original thumbnail file.
"""
if not os.path.exists(TILE_SUMMARY_ON_ORIGINAL_THUMBNAIL_DIR):
os.makedirs(TILE_SUMMARY_ON_ORIGINAL_THUMBNAIL_DIR)
img_path = os.path.join(TILE_SUMMARY_ON_ORIGINAL_THUMBNAIL_DIR,
get_tile_summary_image_filename(slide_number, thumbnail=True))
return img_path
def get_top_tiles_on_original_image_path(slide_number):
"""
Convert slide number to a path to a top tiles on original image file.
Example:
5 -> ../data/top_tiles_on_original_png/TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.png
Args:
slide_number: The slide number.
Returns:
Path to the top tiles on original image file.
"""
if not os.path.exists(TOP_TILES_ON_ORIGINAL_DIR):
os.makedirs(TOP_TILES_ON_ORIGINAL_DIR)
img_path = os.path.join(TOP_TILES_ON_ORIGINAL_DIR, get_top_tiles_image_filename(slide_number))
return img_path
def get_top_tiles_on_original_thumbnail_path(slide_number):
"""
Convert slide number to a path to a top tiles on original thumbnail file.
Example:
5 -> ../data/top_tiles_on_original_thumbnail_jpg/TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.jpg
Args:
slide_number: The slide number.
Returns:
Path to the top tiles on original thumbnail file.
"""
if not os.path.exists(TOP_TILES_ON_ORIGINAL_THUMBNAIL_DIR):
os.makedirs(TOP_TILES_ON_ORIGINAL_THUMBNAIL_DIR)
img_path = os.path.join(TOP_TILES_ON_ORIGINAL_THUMBNAIL_DIR,
get_top_tiles_image_filename(slide_number, thumbnail=True))
return img_path
def get_tile_summary_image_filename(slide_number, thumbnail=False):
"""
Convert slide number to a tile summary image file name.
Example:
5, False -> TUPAC-TR-005-tile_summary.png
5, True -> TUPAC-TR-005-tile_summary.jpg
Args:
slide_number: The slide number.
thumbnail: If True, produce thumbnail filename.
Returns:
The tile summary image file name.
"""
if thumbnail:
ext = THUMBNAIL_EXT
else:
ext = DEST_TRAIN_EXT
padded_sl_num = str(slide_number).zfill(3)
training_img_path = get_training_image_path(slide_number)
large_w, large_h, small_w, small_h = parse_dimensions_from_image_filename(training_img_path)
img_filename = TRAIN_PREFIX + padded_sl_num + "-" + str(SCALE_FACTOR) + "x-" + str(large_w) + "x" + str(
large_h) + "-" + str(small_w) + "x" + str(small_h) + "-" + TILE_SUMMARY_SUFFIX + "." + ext
return img_filename
def get_top_tiles_image_filename(slide_number, thumbnail=False):
"""
Convert slide number to a top tiles image file name.
Example:
5, False -> TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.png
5, True -> TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.jpg
Args:
slide_number: The slide number.
thumbnail: If True, produce thumbnail filename.
Returns:
The top tiles image file name.
"""
if thumbnail:
ext = THUMBNAIL_EXT
else:
ext = DEST_TRAIN_EXT
padded_sl_num = str(slide_number).zfill(3)
training_img_path = get_training_image_path(slide_number)
large_w, large_h, small_w, small_h = parse_dimensions_from_image_filename(training_img_path)
img_filename = TRAIN_PREFIX + padded_sl_num + "-" + str(SCALE_FACTOR) + "x-" + str(large_w) + "x" + str(
large_h) + "-" + str(small_w) + "x" + str(small_h) + "-" + TOP_TILES_SUFFIX + "." + ext
return img_filename
def get_top_tiles_image_path(slide_number):
"""
Convert slide number to a path to a top tiles image file.
Example:
5 -> ../data/top_tiles_png/TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.png
Args:
slide_number: The slide number.
Returns:
Path to the top tiles image file.
"""
if not os.path.exists(TOP_TILES_DIR):
os.makedirs(TOP_TILES_DIR)
img_path = os.path.join(TOP_TILES_DIR, get_top_tiles_image_filename(slide_number))
return img_path
def get_top_tiles_thumbnail_path(slide_number):
"""
Convert slide number to a path to a tile summary thumbnail file.
Example:
5 -> ../data/top_tiles_thumbnail_jpg/TUPAC-TR-005-32x-49920x108288-1560x3384-top_tiles.jpg
Args:
slide_number: The slide number.
Returns:
Path to the top tiles thumbnail file.
"""
if not os.path.exists(TOP_TILES_THUMBNAIL_DIR):
os.makedirs(TOP_TILES_THUMBNAIL_DIR)
img_path = os.path.join(TOP_TILES_THUMBNAIL_DIR, get_top_tiles_image_filename(slide_number, thumbnail=True))
return img_path
def get_tile_data_filename(slide_number):
"""
Convert slide number to a tile data file name.
Example:
5 -> TUPAC-TR-005-32x-49920x108288-1560x3384-tile_data.csv
Args:
slide_number: The slide number.
Returns:
The tile data file name.
"""
padded_sl_num = str(slide_number).zfill(3)
training_img_path = get_training_image_path(slide_number)
large_w, large_h, small_w, small_h = parse_dimensions_from_image_filename(training_img_path)
data_filename = TRAIN_PREFIX + padded_sl_num + "-" + str(SCALE_FACTOR) + "x-" + str(large_w) + "x" + str(
large_h) + "-" + str(small_w) + "x" + str(small_h) + "-" + TILE_DATA_SUFFIX + ".csv"
return data_filename
def get_tile_data_path(slide_number):
"""
Convert slide number to a path to a tile data file.
Example:
5 -> ../data/tile_data/TUPAC-TR-005-32x-49920x108288-1560x3384-tile_data.csv
Args:
slide_number: The slide number.
Returns:
Path to the tile data file.
"""
if not os.path.exists(TILE_DATA_DIR):
os.makedirs(TILE_DATA_DIR)
file_path = os.path.join(TILE_DATA_DIR, get_tile_data_filename(slide_number))
return file_path
def get_filter_image_result(slide_number):
"""
Convert slide number to the path to the file that is the final result of filtering.
Example:
5 -> ../data/filter_png/TUPAC-TR-005-32x-49920x108288-1560x3384-filtered.png
Args:
slide_number: The slide number.
Returns:
Path to the filter image file.
"""
padded_sl_num = str(slide_number).zfill(3)
training_img_path = get_training_image_path(slide_number)
large_w, large_h, small_w, small_h = parse_dimensions_from_image_filename(training_img_path)
img_path = os.path.join(FILTER_DIR, TRAIN_PREFIX + padded_sl_num + "-" + str(
SCALE_FACTOR) + "x-" + FILTER_SUFFIX + str(large_w) + "x" + str(large_h) + "-" + str(small_w) + "x" + str(
small_h) + "-" + FILTER_RESULT_TEXT + "." + DEST_TRAIN_EXT)
return img_path
def get_filter_thumbnail_result(slide_number):
"""
Convert slide number to the path to the file that is the final thumbnail result of filtering.
Example:
5 -> ../data/filter_thumbnail_jpg/TUPAC-TR-005-32x-49920x108288-1560x3384-filtered.jpg
Args:
slide_number: The slide number.
Returns:
Path to the filter thumbnail file.
"""
padded_sl_num = str(slide_number).zfill(3)
training_img_path = get_training_image_path(slide_number)
large_w, large_h, small_w, small_h = parse_dimensions_from_image_filename(training_img_path)
img_path = os.path.join(FILTER_THUMBNAIL_DIR, TRAIN_PREFIX + padded_sl_num + "-" + str(
SCALE_FACTOR) + "x-" + FILTER_SUFFIX + str(large_w) + "x" + str(large_h) + "-" + str(small_w) + "x" + str(
small_h) + "-" + FILTER_RESULT_TEXT + "." + THUMBNAIL_EXT)
return img_path
def parse_dimensions_from_image_filename(filename):
"""
Parse an image filename to extract the original width and height and the converted width and height.
Example:
"TUPAC-TR-011-32x-97103x79079-3034x2471-tile_summary.png" -> (97103, 79079, 3034, 2471)
Args:
filename: The image filename.
Returns:
Tuple consisting of the original width, original height, the converted width, and the converted height.
"""
m = re.match(".*-([\d]*)x([\d]*)-([\d]*)x([\d]*).*\..*", filename)
large_w = int(m.group(1))
large_h = int(m.group(2))
small_w = int(m.group(3))
small_h = int(m.group(4))
return large_w, large_h, small_w, small_h
def small_to_large_mapping(small_pixel, large_dimensions):
"""
Map a scaled-down pixel width and height to the corresponding pixel of the original whole-slide image.
Args:
small_pixel: The scaled-down width and height.
large_dimensions: The width and height of the original whole-slide image.
Returns:
Tuple consisting of the scaled-up width and height.
"""
small_x, small_y = small_pixel
large_w, large_h = large_dimensions
large_x = round((large_w / SCALE_FACTOR) / math.floor(large_w / SCALE_FACTOR) * (SCALE_FACTOR * small_x))
large_y = round((large_h / SCALE_FACTOR) / math.floor(large_h / SCALE_FACTOR) * (SCALE_FACTOR * small_y))
return large_x, large_y
def training_slide_to_image(slide_number):
"""
Convert a WSI training slide to a saved scaled-down image in a format such as jpg or png.
Args:
slide_number: The slide number.
"""
img, large_w, large_h, new_w, new_h = slide_to_scaled_pil_image(slide_number)
img_path = get_training_image_path(slide_number, large_w, large_h, new_w, new_h)
print("Saving image to: " + img_path)
if not os.path.exists(DEST_TRAIN_DIR):
os.makedirs(DEST_TRAIN_DIR)
img.save(img_path)
thumbnail_path = get_training_thumbnail_path(slide_number, large_w, large_h, new_w, new_h)
save_thumbnail(img, THUMBNAIL_SIZE, thumbnail_path)
def slide_to_scaled_pil_image(slide_number):
"""
Convert a WSI training slide to a scaled-down PIL image.
Args:
slide_number: The slide number.
Returns:
Tuple consisting of scaled-down PIL image, original width, original height, new width, and new height.
"""
slide_filepath = get_training_slide_path(slide_number)
print("Opening Slide #%d: %s" % (slide_number, slide_filepath))
slide = open_slide(slide_filepath)
large_w, large_h = slide.dimensions
new_w = math.floor(large_w / SCALE_FACTOR)
new_h = math.floor(large_h / SCALE_FACTOR)
level = slide.get_best_level_for_downsample(SCALE_FACTOR)
whole_slide_image = slide.read_region((0, 0), level, slide.level_dimensions[level])
whole_slide_image = whole_slide_image.convert("RGB")
img = whole_slide_image.resize((new_w, new_h), PIL.Image.BILINEAR)
return img, large_w, large_h, new_w, new_h
def slide_to_scaled_np_image(slide_number):
"""
Convert a WSI training slide to a scaled-down NumPy image.
Args:
slide_number: The slide number.
Returns:
Tuple consisting of scaled-down NumPy image, original width, original height, new width, and new height.
"""
pil_img, large_w, large_h, new_w, new_h = slide_to_scaled_pil_image(slide_number)
np_img = util.pil_to_np_rgb(pil_img)
return np_img, large_w, large_h, new_w, new_h
def show_slide(slide_number):
"""
Display a WSI slide on the screen, where the slide has been scaled down and converted to a PIL image.
Args:
slide_number: The slide number.
"""
pil_img = slide_to_scaled_pil_image(slide_number)[0]
pil_img.show()
def save_thumbnail(pil_img, size, path, display_path=False):
"""
Save a thumbnail of a PIL image, specifying the maximum width or height of the thumbnail.
Args:
pil_img: The PIL image to save as a thumbnail.
size: The maximum width or height of the thumbnail.
path: The path to the thumbnail.
display_path: If True, display thumbnail path in console.
"""
max_size = tuple(round(size * d / max(pil_img.size)) for d in pil_img.size)
img = pil_img.resize(max_size, PIL.Image.BILINEAR)
if display_path:
print("Saving thumbnail to: " + path)
dir = os.path.dirname(path)
if dir != '' and not os.path.exists(dir):
os.makedirs(dir)
img.save(path)
def get_num_training_slides():
"""
Obtain the total number of WSI training slide images.
Returns:
The total number of WSI training slide images.
"""
num_training_slides = len(glob.glob1(SRC_TRAIN_DIR, "*." + SRC_TRAIN_EXT))
return num_training_slides
def training_slide_range_to_images(start_ind, end_ind):
"""
Convert a range of WSI training slides to smaller images (in a format such as jpg or png).
Args:
start_ind: Starting index (inclusive).
end_ind: Ending index (inclusive).
Returns:
The starting index and the ending index of the slides that were converted.
"""
for slide_num in range(start_ind, end_ind + 1):
training_slide_to_image(slide_num)
return (start_ind, end_ind)
def singleprocess_training_slides_to_images():
"""
Convert all WSI training slides to smaller images using a single process.
"""
t = Time()
num_train_images = get_num_training_slides()
training_slide_range_to_images(1, num_train_images)
t.elapsed_display()
def multiprocess_training_slides_to_images():
"""
Convert all WSI training slides to smaller images using multiple processes (one process per core).
Each process will process a range of slide numbers.
"""
timer = Time()
# how many processes to use
num_processes = multiprocessing.cpu_count()
pool = multiprocessing.Pool(num_processes)
num_train_images = get_num_training_slides()
if num_processes > num_train_images:
num_processes = num_train_images
images_per_process = num_train_images / num_processes
print("Number of processes: " + str(num_processes))
print("Number of training images: " + str(num_train_images))
# each task specifies a range of slides
tasks = []
for num_process in range(1, num_processes + 1):
start_index = (num_process - 1) * images_per_process + 1
end_index = num_process * images_per_process
start_index = int(start_index)
end_index = int(end_index)
tasks.append((start_index, end_index))
if start_index == end_index:
print("Task #" + str(num_process) + ": Process slide " + str(start_index))
else:
print("Task #" + str(num_process) + ": Process slides " + str(start_index) + " to " + str(end_index))
# start tasks
results = []
for t in tasks:
results.append(pool.apply_async(training_slide_range_to_images, t))
for result in results:
(start_ind, end_ind) = result.get()
if start_ind == end_ind:
print("Done converting slide %d" % start_ind)
else:
print("Done converting slides %d through %d" % (start_ind, end_ind))
timer.elapsed_display()
def slide_stats():
"""
Display statistics/graphs about training slides.
"""
t = Time()
if not os.path.exists(STATS_DIR):
os.makedirs(STATS_DIR)
num_train_images = get_num_training_slides()
slide_stats = []
for slide_num in range(1, num_train_images + 1):
slide_filepath = get_training_slide_path(slide_num)
print("Opening Slide #%d: %s" % (slide_num, slide_filepath))
slide = open_slide(slide_filepath)
(width, height) = slide.dimensions
print(" Dimensions: {:,d} x {:,d}".format(width, height))
slide_stats.append((width, height))
max_width = 0
max_height = 0
min_width = sys.maxsize
min_height = sys.maxsize
total_width = 0
total_height = 0
total_size = 0
which_max_width = 0
which_max_height = 0
which_min_width = 0
which_min_height = 0
max_size = 0
min_size = sys.maxsize
which_max_size = 0
which_min_size = 0
for z in range(0, num_train_images):
(width, height) = slide_stats[z]
if width > max_width:
max_width = width
which_max_width = z + 1
if width < min_width:
min_width = width
which_min_width = z + 1
if height > max_height:
max_height = height
which_max_height = z + 1
if height < min_height:
min_height = height
which_min_height = z + 1
size = width * height
if size > max_size:
max_size = size
which_max_size = z + 1
if size < min_size:
min_size = size
which_min_size = z + 1
total_width = total_width + width
total_height = total_height + height
total_size = total_size + size
avg_width = total_width / num_train_images
avg_height = total_height / num_train_images
avg_size = total_size / num_train_images
stats_string = ""
stats_string += "%-11s {:14,d} pixels (slide #%d)".format(max_width) % ("Max width:", which_max_width)
stats_string += "\n%-11s {:14,d} pixels (slide #%d)".format(max_height) % ("Max height:", which_max_height)
stats_string += "\n%-11s {:14,d} pixels (slide #%d)".format(max_size) % ("Max size:", which_max_size)
stats_string += "\n%-11s {:14,d} pixels (slide #%d)".format(min_width) % ("Min width:", which_min_width)
stats_string += "\n%-11s {:14,d} pixels (slide #%d)".format(min_height) % ("Min height:", which_min_height)
stats_string += "\n%-11s {:14,d} pixels (slide #%d)".format(min_size) % ("Min size:", which_min_size)
stats_string += "\n%-11s {:14,d} pixels".format(round(avg_width)) % "Avg width:"
stats_string += "\n%-11s {:14,d} pixels".format(round(avg_height)) % "Avg height:"
stats_string += "\n%-11s {:14,d} pixels".format(round(avg_size)) % "Avg size:"
stats_string += "\n"
print(stats_string)
stats_string += "\nslide number,width,height"
for i in range(0, len(slide_stats)):
(width, height) = slide_stats[i]
stats_string += "\n%d,%d,%d" % (i + 1, width, height)
stats_string += "\n"
stats_file = open(os.path.join(STATS_DIR, "stats.txt"), "w")
stats_file.write(stats_string)
stats_file.close()
t.elapsed_display()
x, y = zip(*slide_stats)
colors = np.random.rand(num_train_images)
sizes = [10 for n in range(num_train_images)]
plt.scatter(x, y, s=sizes, c=colors, alpha=0.7)
plt.xlabel("width (pixels)")
plt.ylabel("height (pixels)")
plt.title("SVS Image Sizes")
plt.set_cmap("prism")
plt.tight_layout()
plt.savefig(os.path.join(STATS_DIR, "svs-image-sizes.png"))
plt.show()
plt.clf()
plt.scatter(x, y, s=sizes, c=colors, alpha=0.7)
plt.xlabel("width (pixels)")
plt.ylabel("height (pixels)")
plt.title("SVS Image Sizes (Labeled with slide numbers)")
plt.set_cmap("prism")
for i in range(num_train_images):
snum = i + 1
plt.annotate(str(snum), (x[i], y[i]))
plt.tight_layout()
plt.savefig(os.path.join(STATS_DIR, "svs-image-sizes-slide-numbers.png"))
plt.show()
plt.clf()
area = [w * h / 1000000 for (w, h) in slide_stats]
plt.hist(area, bins=64)
plt.xlabel("width x height (M of pixels)")
plt.ylabel("# images")
plt.title("Distribution of image sizes in millions of pixels")
plt.tight_layout()
plt.savefig(os.path.join(STATS_DIR, "distribution-of-svs-image-sizes.png"))
plt.show()
plt.clf()
whratio = [w / h for (w, h) in slide_stats]
plt.hist(whratio, bins=64)
plt.xlabel("width to height ratio")
plt.ylabel("# images")
plt.title("Image shapes (width to height)")
plt.tight_layout()
plt.savefig(os.path.join(STATS_DIR, "w-to-h.png"))
plt.show()
plt.clf()
hwratio = [h / w for (w, h) in slide_stats]
plt.hist(hwratio, bins=64)
plt.xlabel("height to width ratio")
plt.ylabel("# images")
plt.title("Image shapes (height to width)")
plt.tight_layout()
plt.savefig(os.path.join(STATS_DIR, "h-to-w.png"))
plt.show()
def slide_info(display_all_properties=False):
"""
Display information (such as properties) about training images.
Args:
display_all_properties: If True, display all available slide properties.
"""
t = Time()
num_train_images = get_num_training_slides()
obj_pow_20_list = []
obj_pow_40_list = []
obj_pow_other_list = []
for slide_num in range(1, num_train_images + 1):
slide_filepath = get_training_slide_path(slide_num)
print("\nOpening Slide #%d: %s" % (slide_num, slide_filepath))
slide = open_slide(slide_filepath)
print("Level count: %d" % slide.level_count)
print("Level dimensions: " + str(slide.level_dimensions))
print("Level downsamples: " + str(slide.level_downsamples))
print("Dimensions: " + str(slide.dimensions))
objective_power = int(slide.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
print("Objective power: " + str(objective_power))
if objective_power == 20:
obj_pow_20_list.append(slide_num)
elif objective_power == 40:
obj_pow_40_list.append(slide_num)
else:
obj_pow_other_list.append(slide_num)
print("Associated images:")
for ai_key in slide.associated_images.keys():
print(" " + str(ai_key) + ": " + str(slide.associated_images.get(ai_key)))
print("Format: " + str(slide.detect_format(slide_filepath)))
if display_all_properties:
print("Properties:")
for prop_key in slide.properties.keys():
print(" Property: " + str(prop_key) + ", value: " + str(slide.properties.get(prop_key)))
print("\n\nSlide Magnifications:")
print(" 20x Slides: " + str(obj_pow_20_list))
print(" 40x Slides: " + str(obj_pow_40_list))
print(" ??x Slides: " + str(obj_pow_other_list) + "\n")
t.elapsed_display()
# if __name__ == "__main__":
# show_slide(2)
# slide_info(display_all_properties=True)
# slide_stats()
# training_slide_to_image(4)
# img_path = get_training_image_path(4)
# img = open_image(img_path)
# img.show()
# slide_to_scaled_pil_image(5)[0].show()
# singleprocess_training_slides_to_images()
# multiprocess_training_slides_to_images()
| 32.08463
| 120
| 0.711912
|
dd13119d988245579ae658a48bad6de144079596
| 1,718
|
py
|
Python
|
plugin.video.fanfilm/resources/lib/resolvers/mailru.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
plugin.video.fanfilm/resources/lib/resolvers/mailru.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
plugin.video.fanfilm/resources/lib/resolvers/mailru.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
# -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,json,urllib
import requests
def resolve(url):
try:
usr = re.compile('/mail/(.+?)/').findall(url)[0]
vid = re.compile('(\d*)[.]html').findall(url)[0]
url = 'http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60' % (usr, vid)
result = requests.get(url).content
cookie = requests.get(url).headers['Set-Cookie']
u = json.loads(result)['videos']
print("u",u)
h = "|Cookie=%s" % urllib.quote(cookie)
url = []
try: url += [[{'quality': '1080p', 'url': i['url'] + h} for i in u if i['key'] == '1080p'][0]]
except: pass
try: url += [[{'quality': 'HD', 'url': i['url'] + h} for i in u if i['key'] == '720p'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i['url'] + h} for i in u if not (i['key'] == '1080p' or i ['key'] == '720p')][0]]
except: pass
if url == []: return
return url
except:
return
| 33.038462
| 128
| 0.590221
|
8092c70f3bae9b569cae61f3e3c295c5bd4e5cd6
| 15,963
|
py
|
Python
|
AI_detection.py
|
wb666greene/SecurityDVR_AI_addon
|
1e3aa925ccd0294b0ea76d1733cd93eab9b37068
|
[
"MIT"
] | 9
|
2018-05-29T19:27:55.000Z
|
2021-09-28T08:14:01.000Z
|
AI_detection.py
|
wb666greene/SecurityDVR_AI_addon
|
1e3aa925ccd0294b0ea76d1733cd93eab9b37068
|
[
"MIT"
] | 1
|
2018-05-31T20:54:51.000Z
|
2019-03-26T16:55:54.000Z
|
AI_detection.py
|
wb666greene/SecurityDVR_AI_addon
|
1e3aa925ccd0294b0ea76d1733cd93eab9b37068
|
[
"MIT"
] | 3
|
2018-05-31T07:31:36.000Z
|
2019-12-26T13:55:02.000Z
|
#!/usr/bin/env python
# AI_detection.py derived from: ncs_realtime_objectdetection.py 7May18wbk
# Original code and tutorial:
# https://www.pyimagesearch.com/2018/02/19/real-time-object-detection-on-the-raspberry-pi-with-the-movidius-ncs/
# Modified to run AI object detection on image sequences instead of PiCam video stream using Movidius Neuro Compute Stick
#
# USAGE
# changed defaults 9May16wbk
# Command: python AI_detection.py
# will default to using graphs/mobilenetgraph
#
# python AI_detection.py --graph graphs/mobilenetgraph
# will use an alternate NCS graph, but their be issues with using a different graph file!
#
# python AI_detection.py --graph graphs/mobilenetgraph --confidence 0.76 --display 0
# will let you change all the defaults (shown)
#
# Get path to image files via MQTT 8MAY18wbk
# currently input images are assumed to be size 704x480 from Lorex DVR "snapshots" This restriction remove 19May19wbk
#
# 13May18wbk
# I developed this with the ftpd, Samba server, and node-red running on AlarmPi headless Pi2 running Raspbian "Jessie".
# The node-red flow is saved in the file: AlarmPi_flow.formatted.json and conncets to the MQTT server on Alarmbone Beaglebone
# computer that interfaces the PIR and handles alarm system states "Email" "Audio" "Idle".
# The node-red flow filters the filenames ftpd receives based on PIR motion detectors that cover the camera fied of view.
# The Lorex video motion detection is useless basically just "if n pixels changed in x amount of time its motion", pitiful.
# Unfortunately PIR motion detectors false trigger as the sun moves accross the sky or dark fast moving clouds pass by, so
# PIR motion detectors don't solve the false motion problems unless the field of view doesn't include significant natural
# light or cycling heating/cooling sources.
#
# This program was developed on a Pi3 running Raspbian "Stretch" networked with AlarmPi and Alarmbone. I plan to move it to
# run on AlarmPi upgraded to a Pi3 B+ (for faster ethernet connection to the Lorex) and use an MQTT server running on it
# to keep lorex the Lorex topic MQTT messages local. 17May18wbk the port went well but the Lorex snapshot rate is less
# than I'd like.
#
#19May18wbk
# Modified to use a simplified node-red flow to be the ftp server and write the files to a local directory sending
# the filenames to this AI_detection script via MQTT for a simple way to demo the ideas behind the system.
# Use a PiZeroW with PiCamera running MotioneyeOS to be the DVR. Simple and relatively inexpensive
# system to duplicate -- Movidius NCS ~$80, Pi3 ~$30-35, PiZeroW ~$10, PiCamera module ~$30, + power supplies and suitable cases.
# the mundane keyboard, monitor, cables, etc. are not really needed for development, as it all can be done via ssh, but IMHO
# it really helps with the initial setup of Raspbian and MotioneyeOS to have a KVM.
# MotioneyeOS has a decent enough video motion detector that adding a PIR didn't seem to be worth the trouble.
#
# Other than removing all the crap to support the FLIR Lorex DVR lameness, the major change was to automatically handle various input image frame sizes.
# Sorry, but I hate Python's significant whitespace feature and use a 43" 4K monitor with 160 column editors/xterms for my work.
# import the necessary packages
from mvnc import mvncapi as mvnc
import argparse
import numpy as np
import time
import cv2
import paho.mqtt.client as mqtt
import time
import os
# MQTT server used by the node-red flow for sending the filenames to be analyzed
MQTTserver="localhost"
# these are basically where the ftp client wants to put the files on the server
# and are used in the on_message() mqtt callback
# These reflect the settings used in the MotioneyeOS setup
ftpdPath="/home/pi/Meye/2018-05-20"
ftpdFQN="/home/pi/Meye/2018-05-20/10-35-12.jpg"
ftpdTopic="/home/pi/Meye"
subscribeTopic="/home/pi/Meye/#"
# initialize the list of class labels our network was trained to
# detect, then generate a set of bounding box colors for each class
# Problem: if a different neural net graph is passed in as a parameter these are
# unlikely to be correct for it, so the CLASSES labels need to be parameterized as well!
CLASSES = ("background", "aeroplane", "bicycle", "bird",
"boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person",
"pottedplant", "sheep", "sofa", "train", "tvmonitor")
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
COLORS[15] = (250,250,250) # force person box to be white, security cam inages tend to be dark especially in IR illumination
# Folllowing the example in: https://www.pyimagesearch.com/2018/05/14/a-gentle-guide-to-deep-learning-object-detection
# objects to ignore if detected
IGNORE = set(["aeroplane", "boat", "bottle", "bus", "chair", "diningtable","pottedplant", "sofa", "train", "tvmonitor"])
# frame dimensions should match what what the neural net was trained with.
PREPROCESS_DIMS = (300, 300)
#input image size, set by Lorex DVR
DISPLAY_DIMS = (704, 480) #default, is now reset dynamically on every input image received via MQTT
## functions
def preprocess_image(input_image):
# preprocess the image
preprocessed = cv2.resize(input_image, PREPROCESS_DIMS)
preprocessed = preprocessed - 127.5
preprocessed = preprocessed * 0.007843
preprocessed = preprocessed.astype(np.float16)
# return the image to the calling function
return preprocessed
def predict(image, graph):
# preprocess the image
image = preprocess_image(image)
# send the image to the NCS and run a forward pass to grab the
# network predictions
graph.LoadTensor(image, None)
(output, _) = graph.GetResult()
# grab the number of valid object predictions from the output,
# then initialize the list of predictions
num_valid_boxes = output[0]
predictions = []
# loop over results
for box_index in range(num_valid_boxes): # last index is tvmonitor which is not relavent
# calculate the base index into our array so we can extract
# bounding box information
base_index = 7 + box_index * 7
# boxes with non-finite (inf, nan, etc) numbers must be ignored
if (not np.isfinite(output[base_index]) or
not np.isfinite(output[base_index + 1]) or
not np.isfinite(output[base_index + 2]) or
not np.isfinite(output[base_index + 3]) or
not np.isfinite(output[base_index + 4]) or
not np.isfinite(output[base_index + 5]) or
not np.isfinite(output[base_index + 6])):
continue
# extract the image width and height and clip the boxes to the
# image size in case network returns boxes outside of the image boundaries
(h, w) = image.shape[:2]
x1 = max(0, int(output[base_index + 3] * w))
y1 = max(0, int(output[base_index + 4] * h))
x2 = min(w, int(output[base_index + 5] * w))
y2 = min(h, int(output[base_index + 6] * h))
# grab the prediction class label, confidence (i.e., probability),
# and bounding box (x, y)-coordinates
pred_class = int(output[base_index + 1])
pred_conf = output[base_index + 2]
pred_boxpts = ((x1, y1), (x2, y2))
# create prediciton tuple and append the prediction to the predictions list
prediction = (pred_class, pred_conf, pred_boxpts)
# my initial kludge to ignore one class sometime in my camers field of view
#if pred_class != 20: # filter out prediction of tvmonitor which is not relavent for alarm system monitoring
# Better way from apyImageSeach tutorial
if not CLASSES[pred_class] in IGNORE:
predictions.append(prediction)
# return the list of predictions to the calling function
return predictions
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(subscribeTopic)
# The callback for when a PUBLISH message is received from the server, aka message from SUBSCRIBE topic.
# typical filepath from Motioneye: "/home/pi/Meye/2018-05-20/10-35-12.jpg"
# this bit of code will likely be DVR depenent and probably should be parameterized
inName="" # file name via MQTT node-red on alarmPi
LorexMode="" # will be Email, Audio, or Idle via MQTT from alarmboneServer
def on_message(client, userdata, msg):
global inName
global LorexMode # not used here, would be Email, Audio, or Idle
if msg.topic == "/home/pi/Meye/Mode":
print(msg.topic+" "+str(msg.payload))
LorexMode = str(msg.payload)
elif str(msg.topic).startswith(ftpdTopic) == True:
# msg.topic is the ftpdFQN, perhaps I'm abusing MQTT but it simplifies thinks
inName=str(msg.topic)
#print inName
folder=inName[:len(ftpdPath)]
#print folder
if os.path.exists(folder) == False:
#print folder
os.mkdir(folder)
#print inName
# write the file from motioneye
if args["save"] > 0:
outfile=open(inName,'wb')
else:
outfile=open("discardMe.jpg",'wb')
outfile.write(msg.payload)
outfile.close()
else:
inName=""
def on_publish(client, userdata, mid):
#print("mid: " + str(mid)) # don't think I need to care about this for now, print for initial tests
pass
def on_disconnect(client, userdata, rc):
if rc != 0:
print("Unexpected disconnection!")
pass
## Get things started!
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-g", "--graph", default="./graphs/mobilenetgraph", help="optional path to input Movidius graph file")
ap.add_argument("-c", "--confidence", default=0.76, help="optional detection threshold, default 0.76")
# running headless (AlarmPi)want display default=0
ap.add_argument("-d", "--display", type=int, default=0, help="1 to display detected image on screen 2 displays NCS input and detected images")
# I default to saving the input images from the DVR for troubleshooting and perhaps future retraining
ap.add_argument("-s", "--save", type=int, default=1, help="1 to save original image in ftp Path, 0 to discard the original input images")
args = vars(ap.parse_args())
# grab a list of all NCS devices plugged in to USB
print("[INFO] finding NCS devices...")
devices = mvnc.EnumerateDevices()
# if no devices found, exit the script
if len(devices) == 0:
print("[INFO] No devices found. Please plug in a NCS")
quit()
# use the first device since this is a simple test script
# (you'll want to modify this is using multiple NCS devices)
print("[INFO] found {} devices. device0 will be used. "
"opening device0...".format(len(devices)))
device = mvnc.Device(devices[0])
device.OpenDevice()
# open the CNN graph file
print("[INFO] loading the graph file into RPi memory...")
with open(args["graph"], mode="rb") as f:
graph_in_memory = f.read()
# load the graph into the NCS
print("[INFO] allocating the graph on the NCS...")
graph = device.AllocateGraph(graph_in_memory)
# connect to MQTT broker
print("[INFO] connecting to MQTT broker...")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
client.on_disconnect = on_disconnect
client.will_set("AI/Status", "AI sub-system has died!", 2, True) # let everyone know we have died, perhaps node-red can restart it
client.connect(MQTTserver, 1883, 60)
client.publish("AI/Status", "AI running.", 2, True)
if args["display"] > 0:
cv2.namedWindow("Detector Output")
if args["display"] == 2:
cv2.namedWindow("AI Input")
cv2.moveWindow("AI Input", 10, 40)
cv2.moveWindow("Detector Output", 25 + PREPROCESS_DIMS[0], 40)
else:
cv2.moveWindow("Detector Output", 10, 40)
while True:
try:
if len(inName) >= len(ftpdPath):
#print "Processing --> "+inName
# the inName file was written by MQTT callback before getting here
if args["save"] > 0:
frame = cv2.imread(inName)
else:
frame = cv2.imread("discardMe.jpg")
(h,w)=frame.shape[:2]
DISPLAY_DIMS=(w,h)
image_for_result = frame.copy()
frame = cv2.resize(frame, PREPROCESS_DIMS)
if args["display"] == 2:
cv2.imshow("AI Input", frame)
key = cv2.waitKey(1) & 0xFF
# use the NCS to acquire predictions, deceptively simple
# all the hard AI work was done training the model used
# and "compiling" it for the NCS on a desktop computer
person_found = False
predictions = predict(frame, graph)
# loop over our predictions
for (i, pred) in enumerate(predictions):
# extract prediction data for readability
(pred_class, pred_conf, pred_boxpts) = pred
# filter out weak detections by ensuring the `confidence`
# is greater than the minimum confidence
if pred_conf > args["confidence"]:
# print prediction to terminal
#print("[INFO] Prediction #{}: class={}, confidence={}, ""boxpoints={}".format(i, CLASSES[pred_class], pred_conf, pred_boxpts))
# build a label consisting of the predicted class and associated probability
label = "{}: {:.0f}%".format(CLASSES[pred_class],pred_conf * 100)
# extract information from the prediction boxpoints
X_MULTIPLIER = float(DISPLAY_DIMS[0]) / PREPROCESS_DIMS[0]
Y_MULTIPLIER = float(DISPLAY_DIMS[1]) / PREPROCESS_DIMS[1]
(ptA, ptB) = (pred_boxpts[0], pred_boxpts[1])
startX = int(ptA[0] * X_MULTIPLIER)
startY = int(ptA[1] * Y_MULTIPLIER)
endX = int(ptB[0] * X_MULTIPLIER)
endY = int(ptB[1] * Y_MULTIPLIER)
y = startY - 5 if startY - 5 > 5 else startY + 5
# display the rectangle and label text
cv2.rectangle(image_for_result, (startX,startY), (endX,endY), COLORS[pred_class], 1)
cv2.putText(image_for_result, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[pred_class], 1)
if pred_class == 15:
person_found = True
# if a person is found save the output image
if person_found == True:
outName=inName
outName=outName.replace(".jpg",".out.jpg")
cv2.imwrite(outName, image_for_result)
#print " Person --> "+outName # todo: make loggile of results or output via MQTT message Topic: AI/detect perhaps
# pass the result on to anyone that cares via MQTT
client.publish("AI/Detect/Person", outName) # the topic higherarchy perhaps needs more though but keep it simple to start
#else:
# print " No Detection"
# check if we should display the frame on the screen
# with prediction data (you can achieve faster FPS if you
# do not output to the screen)
if args["display"] > 0:
# display the detected frame to the screen
if person_found == True:
cv2.imshow("Detector Output", image_for_result)
# end of if len(inName) >= len(ftpdPath): statement, I sure do hate Python's significant whitespace feature!
if args["display"] > 0:
key = cv2.waitKey(1) & 0xFF # required to pump CV2 event loop and actually display the image
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
inName=""
#pump MQTT to get a file path of next image to be processed
client.loop()
# if "ctrl+c" is pressed in the terminal, break from the loop
except KeyboardInterrupt:
break
# if there's a problem reading a frame, break gracefully
#except AttributeError:
# print " *** Exit on Attribute Error! ***"
# break
except cv2.error as e:
print inName+" --> Error!"
print "**** openCV error: "+str(e)
continue # try to soldier on, I've so far never hit this
# destroy all windows if we are displaying them
if args["display"] > 0:
cv2.destroyAllWindows()
#delete last input file if original images are not being saved
if args["save"] == 0 and os.path.exists("discardMe.jpg"):
os.remove("discardMe.jpg")
# clean up the graph and device
graph.DeallocateGraph()
device.CloseDevice()
# clean up MQTT
client.publish("AI/Status", "AI stopped.", 2, True)
client.loop()
client.loop()
#client.wait_for_publish() # make sure last messagses are sent don't exist anymore??
client.disconnect() # normal exit, Will message should not be sent.
| 42.796247
| 152
| 0.727996
|
add78505b6e86d6ca4cedf94807cc0edee6309f3
| 27,742
|
py
|
Python
|
allennlp/commands/train.py
|
parus-proj/allennlp
|
42f6e030f7287d6521bbc3c883a8cbfbf15f806a
|
[
"Apache-2.0"
] | null | null | null |
allennlp/commands/train.py
|
parus-proj/allennlp
|
42f6e030f7287d6521bbc3c883a8cbfbf15f806a
|
[
"Apache-2.0"
] | null | null | null |
allennlp/commands/train.py
|
parus-proj/allennlp
|
42f6e030f7287d6521bbc3c883a8cbfbf15f806a
|
[
"Apache-2.0"
] | 1
|
2021-02-04T08:42:23.000Z
|
2021-02-04T08:42:23.000Z
|
"""
The ``train`` subcommand can be used to train a model.
It requires a configuration file and a directory in
which to write the results.
.. code-block:: bash
$ allennlp train --help
usage: allennlp train [-h] -s SERIALIZATION_DIR [-r] [-f] [-o OVERRIDES]
[--file-friendly-logging] [--node-rank NODE_RANK]
[--include-package INCLUDE_PACKAGE]
param_path
Train the specified model on the specified dataset.
positional arguments:
param_path path to parameter file describing the model to be
trained
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization-dir SERIALIZATION_DIR
directory in which to save the model and its logs
-r, --recover recover training from the state in serialization_dir
-f, --force overwrite the output directory if it exists
-o OVERRIDES, --overrides OVERRIDES
a JSON structure used to override the experiment
configuration
--file-friendly-logging
outputs tqdm status on separate lines and slows tqdm
refresh rate
--node-rank NODE_RANK
Rank of this node in the distributed setup (default =
0)
--include-package INCLUDE_PACKAGE
additional packages to include
"""
import argparse
import logging
import os
from typing import Any, Dict, Iterable, List, Optional
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from overrides import overrides
from allennlp.commands.subcommand import Subcommand
from allennlp.common import Params, Registrable, Lazy
from allennlp.common.checks import check_for_gpu, ConfigurationError
from allennlp.common import util as common_util
from allennlp.common.plugins import import_plugins
from allennlp.data import DataIterator, DatasetReader, Instance, Vocabulary
from allennlp.models.archival import archive_model, CONFIG_NAME
from allennlp.models.model import _DEFAULT_WEIGHTS, Model
from allennlp.training.trainer_base import TrainerBase
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
@Subcommand.register("train")
class Train(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Train the specified model on the specified dataset."""
subparser = parser.add_parser(self.name, description=description, help="Train a model.")
subparser.add_argument(
"param_path", type=str, help="path to parameter file describing the model to be trained"
)
subparser.add_argument(
"-s",
"--serialization-dir",
required=True,
type=str,
help="directory in which to save the model and its logs",
)
subparser.add_argument(
"-r",
"--recover",
action="store_true",
default=False,
help="recover training from the state in serialization_dir",
)
subparser.add_argument(
"-f",
"--force",
action="store_true",
required=False,
help="overwrite the output directory if it exists",
)
subparser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
subparser.add_argument(
"--file-friendly-logging",
action="store_true",
default=False,
help="outputs tqdm status on separate lines and slows tqdm refresh rate",
)
subparser.add_argument(
"--node-rank", type=int, default=0, help="Rank of this node in the distributed setup"
)
subparser.set_defaults(func=train_model_from_args)
return subparser
def train_model_from_args(args: argparse.Namespace):
"""
Just converts from an ``argparse.Namespace`` object to string paths.
"""
train_model_from_file(
parameter_filename=args.param_path,
serialization_dir=args.serialization_dir,
overrides=args.overrides,
file_friendly_logging=args.file_friendly_logging,
recover=args.recover,
force=args.force,
node_rank=args.node_rank,
include_package=args.include_package,
)
def train_model_from_file(
parameter_filename: str,
serialization_dir: str,
overrides: str = "",
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
) -> Model:
"""
A wrapper around :func:`train_model` which loads the params from a file.
# Parameters
parameter_filename : ``str``
A json parameter file specifying an AllenNLP experiment.
serialization_dir : ``str``
The directory in which to save results and logs. We just pass this along to
:func:`train_model`.
overrides : ``str``
A JSON string that we will use to override values in the input parameter file.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we make our output more friendly to saved model files. We just pass this
along to :func:`train_model`.
recover : ``bool`, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see ``Model.from_archive``.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
node_rank : ``int``, optional
Rank of the current node in distributed training
include_package : ``str``, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
"""
# Load the experiment config from a file and pass it to ``train_model``.
params = Params.from_file(parameter_filename, overrides)
return train_model(
params=params,
serialization_dir=serialization_dir,
file_friendly_logging=file_friendly_logging,
recover=recover,
force=force,
node_rank=node_rank,
include_package=include_package,
)
def train_model(
params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
node_rank: int = 0,
include_package: List[str] = None,
batch_weight_key: str = "",
) -> Model:
"""
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
# Parameters
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see ``Model.from_archive``.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
node_rank : ``int``, optional
Rank of the current node in distributed training
include_package : ``List[str]``, optional
In distributed mode, extra packages mentioned will be imported in trainer workers.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
# Returns
best_model : ``Model``
The model with the best epoch weights.
"""
training_util.create_serialization_dir(params, serialization_dir, recover, force)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
distributed_params = params.params.pop("distributed", None)
# If distributed isn't in the config and the config contains strictly
# one cuda device, we just run a single training process.
if distributed_params is None:
model = _train_worker(
process_rank=0,
params=params,
serialization_dir=serialization_dir,
file_friendly_logging=file_friendly_logging,
include_package=include_package,
batch_weight_key=batch_weight_key,
)
archive_model(serialization_dir)
return model
# Otherwise, we are running multiple processes for training.
else:
# We are careful here so that we can raise a good error if someone
# passed the wrong thing - cuda_devices are required.
device_ids = distributed_params.pop("cuda_devices", None)
multi_device = isinstance(device_ids, list) and len(device_ids) > 1
num_nodes = distributed_params.pop("num_nodes", 1)
if not (multi_device or num_nodes > 1):
raise ConfigurationError(
"Multiple cuda devices/nodes need to be configured to run distributed training."
)
check_for_gpu(device_ids)
master_addr = distributed_params.pop("master_address", "127.0.0.1")
master_port = distributed_params.pop("master_port", 29500)
num_procs = len(device_ids)
world_size = num_nodes * num_procs
logging.info(
f"Switching to distributed training mode since multiple GPUs are configured"
f"Master is at: {master_addr}:{master_port} | Rank of this node: {node_rank} | "
f"Number of workers in this node: {num_procs} | Number of nodes: {num_nodes} | "
f"World size: {world_size}"
)
# Creating `Vocabulary` objects from workers could be problematic since
# the data iterators in each worker will yield only `rank` specific
# instances. Hence it is safe to construct the vocabulary and write it
# to disk before initializing the distributed context. The workers will
# load the vocabulary from the path specified.
if params.get("vocabulary", Params({})).get("type", "") != "from_files":
vocab = training_util.make_vocab_from_params(params.duplicate(), serialization_dir)
params["vocabulary"] = {
"type": "from_files",
"directory": os.path.join(serialization_dir, "vocabulary"),
"padding_token": vocab._padding_token,
"oov_token": vocab._oov_token,
}
mp.spawn(
_train_worker,
args=(
params.duplicate(),
serialization_dir,
file_friendly_logging,
include_package,
batch_weight_key,
node_rank,
master_addr,
master_port,
world_size,
device_ids,
),
nprocs=num_procs,
)
archive_model(serialization_dir)
model = Model.load(params, serialization_dir)
return model
def _train_worker(
process_rank: int,
params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
include_package: List[str] = None,
batch_weight_key: str = "",
node_rank: int = 0,
master_addr: str = "127.0.0.1",
master_port: int = 29500,
world_size: int = 1,
distributed_device_ids: List[str] = None,
) -> Optional[Model]:
"""
Helper to train the configured model/experiment. In distributed mode, this is spawned as a
worker process. In a single GPU experiment, this returns the ``Model`` object and in distributed
training, nothing is returned.
# Parameters
process_rank : ``int``
The process index that is initialized using the GPU device id.
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
include_package : ``List[str]``, optional
In distributed mode, since this function would have been spawned as a separate process,
the extra imports need to be done again. NOTE: This does not have any effect in single
GPU training.
batch_weight_key : ``str``, optional (default="")
If non-empty, name of metric used to weight the loss on a per-batch basis.
node_rank : ``int``, optional
Rank of the node.
master_addr : ``str``, optional (default="127.0.0.1")
Address of the master node for distributed training.
master_port : ``str``, optional (default="29500")
Port of the master node for distributed training.
world_size : ``int``, optional
The number of processes involved in distributed training.
distributed_device_ids: ``List[str]``, optional
IDs of the devices used involved in distributed training.
# Returns
best_model : ``Model``
The model with the best epoch weights.
"""
common_util.prepare_global_logging(
serialization_dir, file_friendly_logging, rank=process_rank, world_size=world_size
)
common_util.prepare_environment(params)
distributed = world_size > 1
# not using `allennlp.common.util.is_master` as the process group is yet to be initialized
master = process_rank == 0
include_package = include_package or []
if distributed:
# Since the worker is spawned and not forked, the extra imports need to be done again.
import_plugins()
for package_name in include_package:
common_util.import_submodules(package_name)
num_procs_per_node = len(distributed_device_ids)
# The Unique identifier of the worker process among all the processes in the
# distributed training group is computed here. This is used while initializing
# the process group using `init_process_group`
global_rank = node_rank * num_procs_per_node + process_rank
# Number of processes per node is useful to know if a process
# is a master in the local node(node in which it is running)
os.environ["ALLENNLP_PROCS_PER_NODE"] = str(num_procs_per_node)
# In distributed training, the configured device is always going to be a list.
# The corresponding gpu id for the particular worker is obtained by picking the id
# from the device list with the rank as index
gpu_id = distributed_device_ids[process_rank] # type: ignore
# Till now, "cuda_device" might not be set in the trainer params.
# But a worker trainer needs to only know about its specific GPU id.
params["trainer"]["cuda_device"] = gpu_id
params["trainer"]["world_size"] = world_size
params["trainer"]["distributed"] = True
torch.cuda.set_device(int(gpu_id))
dist.init_process_group(
backend="nccl",
init_method=f"tcp://{master_addr}:{master_port}",
world_size=world_size,
rank=global_rank,
)
logging.info(
f"Process group of world size {world_size} initialized "
f"for distributed training in worker {global_rank}"
)
train_loop = TrainModel.from_params(
params=params,
serialization_dir=serialization_dir,
local_rank=process_rank,
batch_weight_key=batch_weight_key,
)
try:
if distributed: # let the setup get ready for all the workers
dist.barrier()
metrics = train_loop.run()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if master and os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info(
"Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights."
)
archive_model(serialization_dir)
raise
if master:
train_loop.finish(metrics)
if not distributed:
return train_loop.model
return None # to make mypy happy
class TrainModel(Registrable):
"""
This class exists so that we can easily read a configuration file with the `allennlp train`
command. The basic logic is that we call `train_loop =
TrainModel.from_params(params_from_config_file)`, then `train_loop.run()`. This class performs
very little logic, pushing most of it to the `TrainerBase` that has a `train()` method. The
point here is to construct all of the dependencies for the `TrainerBase` in a way that we can do
it using `from_params()`, while having all of those dependencies transparently documented and
not hidden in calls to `params.pop()`. If you are writing your own training loop, you almost
certainly should not use this class, but you might look at the code for this class to see what
we do, to make writing your training loop easier.
In particular, if you are tempted to call the `__init__` method of this class, you are probably
doing something unnecessary. Literally all we do after `__init__` is call `trainer.train()`. You
can do that yourself, if you've constructed a `Trainer` already. What this class gives you is a
way to construct the `Trainer` by means of a config file. The actual constructor that we use
with `from_params` in this class is `from_partial_objects`. See that method for a description
of all of the allowed top-level keys in a configuration file used with `allennlp train`.
"""
default_implementation = "default"
def __init__(
self,
serialization_dir: str,
model: Model,
trainer: TrainerBase,
evaluation_dataset: Iterable[Instance] = None,
evaluation_iterator: DataIterator = None,
evaluate_on_test: bool = False,
batch_weight_key: str = "",
) -> None:
self.serialization_dir = serialization_dir
self.model = model
self.trainer = trainer
self.evaluation_dataset = evaluation_dataset
self.evaluation_iterator = evaluation_iterator
self.evaluate_on_test = evaluate_on_test
self.batch_weight_key = batch_weight_key
def run(self) -> Dict[str, Any]:
return self.trainer.train()
def finish(self, metrics: Dict[str, Any]):
if self.evaluation_dataset and self.evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = training_util.evaluate(
self.model,
self.evaluation_dataset,
self.evaluation_iterator,
cuda_device=self.trainer.cuda_device,
batch_weight_key=self.batch_weight_key,
)
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif self.evaluation_dataset:
logger.info(
"To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command."
)
common_util.dump_metrics(
os.path.join(self.serialization_dir, "metrics.json"), metrics, log=True
)
@classmethod
def from_partial_objects(
cls,
serialization_dir: str,
local_rank: int,
batch_weight_key: str,
dataset_reader: DatasetReader,
train_data_path: str,
model: Lazy[Model],
iterator: DataIterator,
trainer: Lazy[TrainerBase],
vocabulary: Lazy[Vocabulary] = None,
datasets_for_vocab_creation: List[str] = None,
validation_dataset_reader: DatasetReader = None,
validation_data_path: str = None,
validation_iterator: DataIterator = None,
test_data_path: str = None,
evaluate_on_test: bool = False,
) -> "TrainModel":
"""
This method is intended for use with our `FromParams` logic, to construct a `TrainModel`
object from a config file passed to the `allennlp train` command. The arguments to this
method are the allowed top-level keys in a configuration file (except for the first three,
which are obtained separately).
You *could* use this outside of our `FromParams` logic if you really want to, but there
might be easier ways to accomplish your goal than instantiating `Lazy` objects. If you are
writing your own training loop, we recommend that you look at the implementation of this
method for inspiration and possibly some utility functions you can call, but you very likely
should not use this method directly.
The `Lazy` type annotations here are a mechanism for building dependencies to an object
sequentially - the `TrainModel` object needs data, a model, and a trainer, but the model
needs to see the data before it's constructed (to create a vocabulary) and the trainer needs
the data and the model before it's constructed. Objects that have sequential dependencies
like this are labeled as `Lazy` in their type annotations, and we pass the missing
dependencies when we call their `construct()` method, which you can see in the code below.
# Parameters
serialization_dir: `str`
The directory where logs and model archives will be saved.
local_rank: `int`
The process index that is initialized using the GPU device id.
batch_weight_key: `str`
The name of metric used to weight the loss on a per-batch basis.
dataset_reader: `DatasetReader`
The `DatasetReader` that will be used for training and (by default) for validation.
train_data_path: `str`
The file (or directory) that will be passed to `dataset_reader.read()` to construct the
training data.
model: `Lazy[Model]`
The model that we will train. This is lazy because it depends on the `Vocabulary`;
after constructing the vocabulary we call `model.construct(vocab=vocabulary)`.
iterator: `DataIterator`
The iterator we use to batch instances from the dataset reader at training and (by
default) validation time.
trainer: `Lazy[TrainerBase]`
The `Trainer` that actually implements the training loop. This is a lazy object because
it depends on the model that's going to be trained.
vocabulary: `Lazy[Vocabulary]`, optional (default=None)
The `Vocabulary` that we will use to convert strings in the data to integer ids (and
possibly set sizes of embedding matrices in the `Model`). By default we construct the
vocabulary from the instances that we read.
datasets_for_vocab_creation: `List[str]`, optional (default=None)
If you pass in more than one dataset but don't want to use all of them to construct a
vocabulary, you can pass in this key to limit it. Valid entries in the list are
"train", "validation" and "test".
validation_dataset_reader: `DatasetReader`, optional (default=None)
If given, we will use this dataset reader for the validation data instead of
`dataset_reader`.
validation_data_path: `str`, optional (default=None)
If given, we will use this data for computing validation metrics and early stopping.
validation_iterator: `DataIterator`, optional (default=None)
If given, we will use this iterator for batching and scheduling instances for the
validation data, instead of `iterator`.
test_data_path: `str`, optional (default=None)
If given, we will use this as test data. This makes it available for vocab creation by
default, but nothing else.
evaluate_on_test: `bool`, optional (default=False)
If given, we will evaluate the final model on this data at the end of training. Note
that we do not recommend using this for actual test data in every-day experimentation;
you should only very rarely evaluate your model on actual test data.
"""
datasets = training_util.read_all_datasets(
train_data_path=train_data_path,
dataset_reader=dataset_reader,
validation_dataset_reader=validation_dataset_reader,
validation_data_path=validation_data_path,
test_data_path=test_data_path,
)
if datasets_for_vocab_creation:
for key in datasets_for_vocab_creation:
if key not in datasets:
raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {key}")
instance_generator = (
instance
for key, dataset in datasets.items()
if not datasets_for_vocab_creation or key in datasets_for_vocab_creation
for instance in dataset
)
vocabulary_ = vocabulary.construct(instances=instance_generator)
if not vocabulary_:
vocabulary_ = Vocabulary.from_instances(instance_generator)
model_ = model.construct(vocab=vocabulary_)
# Initializing the model can have side effect of expanding the vocabulary.
# Save the vocab only in the master. In the degenerate non-distributed
# case, we're trivially the master.
if common_util.is_master():
vocabulary_path = os.path.join(serialization_dir, "vocabulary")
vocabulary_.save_to_files(vocabulary_path)
iterator.index_with(model_.vocab)
validation_iterator = validation_iterator or iterator
validation_iterator.index_with(model_.vocab) # it is ok to call this twice
# We don't need to pass serialization_dir and local_rank here, because they will have been
# passed through the trainer by from_params already, because they were keyword arguments to
# construct this class in the first place.
trainer_ = trainer.construct(
model=model_,
iterator=iterator,
train_data=datasets["train"],
validation_iterator=validation_iterator,
validation_data=datasets.get("validation"),
)
return cls(
serialization_dir=serialization_dir,
model=model_,
trainer=trainer_,
evaluation_dataset=datasets.get("test"),
evaluation_iterator=validation_iterator,
evaluate_on_test=evaluate_on_test,
batch_weight_key=batch_weight_key,
)
TrainModel.register("default", constructor="from_partial_objects")(TrainModel)
| 42.811728
| 102
| 0.657271
|
ab70296e9ae48542f1c5be3a2c61e9349f61d2a3
| 2,752
|
py
|
Python
|
tests/test_efs/test_access_point_tagging.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_efs/test_access_point_tagging.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
tests/test_efs/test_access_point_tagging.py
|
symroe/moto
|
4e106995af6f2820273528fca8a4e9ee288690a5
|
[
"Apache-2.0"
] | null | null | null |
from os import environ
import boto3
import pytest
from moto import mock_efs
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
environ["AWS_ACCESS_KEY_ID"] = "testing"
environ["AWS_SECRET_ACCESS_KEY"] = "testing"
environ["AWS_SECURITY_TOKEN"] = "testing"
environ["AWS_SESSION_TOKEN"] = "testing"
@pytest.fixture(scope="function")
def efs(aws_credentials): # pylint: disable=unused-argument
with mock_efs():
yield boto3.client("efs", region_name="us-east-1")
@pytest.fixture(scope="function")
def file_system(efs):
create_fs_resp = efs.create_file_system(CreationToken="foobarbaz")
create_fs_resp.pop("ResponseMetadata")
yield create_fs_resp
def test_list_tags_for_resource__without_tags(efs, file_system):
file_system_id = file_system["FileSystemId"]
ap_id = efs.create_access_point(ClientToken="ct", FileSystemId=file_system_id)[
"AccessPointId"
]
resp = efs.list_tags_for_resource(ResourceId=ap_id)
resp.should.have.key("Tags").equals([])
def test_list_tags_for_resource__with_tags(efs, file_system):
file_system_id = file_system["FileSystemId"]
ap_id = efs.create_access_point(
ClientToken="ct",
Tags=[{"Key": "key", "Value": "value"}, {"Key": "Name", "Value": "myname"}],
FileSystemId=file_system_id,
)["AccessPointId"]
resp = efs.list_tags_for_resource(ResourceId=ap_id)
resp.should.have.key("Tags").equals(
[{"Key": "key", "Value": "value"}, {"Key": "Name", "Value": "myname"}]
)
def test_tag_resource(efs, file_system):
file_system_id = file_system["FileSystemId"]
ap_id = efs.create_access_point(ClientToken="ct", FileSystemId=file_system_id)[
"AccessPointId"
]
efs.tag_resource(
ResourceId=ap_id,
Tags=[{"Key": "key", "Value": "value"}, {"Key": "Name", "Value": "myname"}],
)
resp = efs.list_tags_for_resource(ResourceId=ap_id)
resp.should.have.key("Tags").equals(
[{"Key": "key", "Value": "value"}, {"Key": "Name", "Value": "myname"}]
)
def test_untag_resource(efs, file_system):
file_system_id = file_system["FileSystemId"]
ap_id = efs.create_access_point(
ClientToken="ct",
Tags=[{"Key": "key1", "Value": "val1"}],
FileSystemId=file_system_id,
)["AccessPointId"]
efs.tag_resource(
ResourceId=ap_id,
Tags=[{"Key": "key2", "Value": "val2"}, {"Key": "key3", "Value": "val3"}],
)
efs.untag_resource(ResourceId=ap_id, TagKeys=["key2"])
resp = efs.list_tags_for_resource(ResourceId=ap_id)
resp.should.have.key("Tags").equals(
[{"Key": "key1", "Value": "val1"}, {"Key": "key3", "Value": "val3"}]
)
| 28.968421
| 84
| 0.655887
|
82d447ac9801571b871da31b8c53c5660bdc5e03
| 1,126
|
py
|
Python
|
resources/filtros.py
|
brocchirodrigo/CURSO_REST_FLASK
|
928ce11ceaff821749ced93fc51eae002b0619be
|
[
"MIT"
] | null | null | null |
resources/filtros.py
|
brocchirodrigo/CURSO_REST_FLASK
|
928ce11ceaff821749ced93fc51eae002b0619be
|
[
"MIT"
] | null | null | null |
resources/filtros.py
|
brocchirodrigo/CURSO_REST_FLASK
|
928ce11ceaff821749ced93fc51eae002b0619be
|
[
"MIT"
] | null | null | null |
def normalize_path_params(cidade=None,
estrelas_min = 0,
estrelas_max = 5,
diarias_min = 0,
diarias_max = 10000,
limit = 50,
offset = 0, **dados):
if cidade:
return {'estrelas_min': estrelas_min,
'estrelas_max': estrelas_max,
'diarias_min': diarias_min,
'diarias_max': diarias_max,
'cidade': cidade,
'limit': limit,
'offset': offset}
return {'estrelas_min': estrelas_min,
'estrelas_max': estrelas_max,
'diarias_min': diarias_min,
'diarias_max': diarias_max,
'limit': limit,
'offset': offset}
consulta_sem_cidade = "SELECT * FROM hoteis \
WHERE (estrelas >= ? and estrelas <= ?) \
and (diarias >= ? and diarias <= ?) \
LIMIT ? OFFSET ?"
consulta_com_cidade = "SELECT * FROM hoteis \
WHERE (estrelas >= ? and estrelas <= ?) \
and (diarias >= ? and diarias <= ?) \
and cidade = ? \
LIMIT ? OFFSET ?"
| 35.1875
| 47
| 0.493783
|
23151ec4748752177dafea366a703ab9f9c4ff74
| 3,592
|
py
|
Python
|
basic/string1.py
|
Riverfount/estudos
|
03e5339de23b9fc49a2e26174d0241e923adb580
|
[
"MIT"
] | null | null | null |
basic/string1.py
|
Riverfount/estudos
|
03e5339de23b9fc49a2e26174d0241e923adb580
|
[
"MIT"
] | null | null | null |
basic/string1.py
|
Riverfount/estudos
|
03e5339de23b9fc49a2e26174d0241e923adb580
|
[
"MIT"
] | 1
|
2017-03-08T21:46:05.000Z
|
2017-03-08T21:46:05.000Z
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
text = 'Number of donuts: '
return text + str(count) if count < 10 else text + 'many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
return '' if len(s) < 2 else s[:2] + s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
return s[0]+s[1:].replace(s[0], '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return b[:2]+a[2:] + ' ' + a[:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print('donuts')
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print()
print('both_ends')
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print()
print('fix_start')
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print()
print('mix_up')
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| 31.234783
| 80
| 0.658686
|
7e43bb059623d9bfbbd2a4f154fdd7879f70f61f
| 262
|
py
|
Python
|
ex13.py
|
Poethalphyu/python_exercises
|
c5a02923f3198a7715fb02704e8c2a2c6e3eadf5
|
[
"MIT"
] | null | null | null |
ex13.py
|
Poethalphyu/python_exercises
|
c5a02923f3198a7715fb02704e8c2a2c6e3eadf5
|
[
"MIT"
] | null | null | null |
ex13.py
|
Poethalphyu/python_exercises
|
c5a02923f3198a7715fb02704e8c2a2c6e3eadf5
|
[
"MIT"
] | null | null | null |
from sys import argv
# read the WYSS section for how to run this
script, first, second, third = argv
print("The script is called:", script)
print("Your first variable is:", first)
print("Your second variable is:", second)
print("Your thid variable is:", third)
| 29.111111
| 43
| 0.732824
|
4a7886ce39544cc0396b412307f60ac5781e1db9
| 393
|
py
|
Python
|
examples/recommendation.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | 1,072
|
2019-08-02T05:46:21.000Z
|
2022-03-31T07:51:53.000Z
|
examples/recommendation.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | 96
|
2019-08-05T17:27:22.000Z
|
2022-03-03T08:36:57.000Z
|
examples/recommendation.py
|
cenyk1230/cogdl
|
fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce
|
[
"MIT"
] | 299
|
2019-08-08T07:33:10.000Z
|
2022-03-31T09:30:07.000Z
|
import numpy as np
from cogdl import pipeline
data = np.array([[0, 0], [0, 1], [0, 2], [1, 1], [1, 3], [1, 4], [2, 4], [2, 5], [2, 6]])
rec = pipeline("recommendation", model="lightgcn", data=data, max_epoch=10, evaluate_interval=1000, cpu=True)
print(rec([0]))
rec = pipeline("recommendation", model="lightgcn", dataset="ali", max_epoch=1, n_negs=1, evaluate_interval=1000)
print(rec([0]))
| 39.3
| 112
| 0.651399
|
f8cd7dd2b1b066729dd1f8eab29c39a2583e6ab0
| 5,483
|
py
|
Python
|
tests/test_routes.py
|
BenjaminMichaelis/WSU-EECS-UndergraduateResearch
|
453e3b978cda7aeb10dddec1d43e769cdb3451f7
|
[
"MIT"
] | null | null | null |
tests/test_routes.py
|
BenjaminMichaelis/WSU-EECS-UndergraduateResearch
|
453e3b978cda7aeb10dddec1d43e769cdb3451f7
|
[
"MIT"
] | 1
|
2021-12-08T21:48:55.000Z
|
2021-12-08T21:48:55.000Z
|
tests/test_routes.py
|
BenjaminMichaelis/WSU-EECS-UndergraduateResearch
|
453e3b978cda7aeb10dddec1d43e769cdb3451f7
|
[
"MIT"
] | null | null | null |
"""
This file contains the functional tests for the routes.
These tests use GETs and POSTs to different URLs to check for the proper behavior.
Resources:
https://flask.palletsprojects.com/en/1.1.x/testing/
https://www.patricksoftwareblog.com/testing-a-flask-application-using-pytest/
"""
import os
import pytest
from app import create_app, db
from app.Model.models import User, Post, Field
from config import Config
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'sqlite://'
SECRET_KEY = 'bad-bad-key'
WTF_CSRF_ENABLED = False
DEBUG = True
TESTING = True
@pytest.fixture(scope='module')
def test_client():
# create the flask application ; configure the app for tests
flask_app = create_app(config_class=TestConfig)
db.init_app(flask_app)
# Flask provides a way to test your application by exposing the Werkzeug test Client
# and handling the context locals for you.
testing_client = flask_app.test_client()
# Establish an application context before running the tests.
ctx = flask_app.app_context()
ctx.push()
yield testing_client
# this is where the testing happens!
ctx.pop()
def new_user(uname, uemail,passwd):
user = User(username=uname, email=uemail)
user.set_password(passwd)
return user
def init_fields():
# initialize the fields
if Field.query.count() == 0:
fields = ['TestField1', 'TestField2','TestField3','TestField4','TestField5']
for t in fields:
db.session.add(Field(name=t))
db.session.commit()
print(fields)
return None
@pytest.fixture
def init_database():
# Create the database and the database table
db.create_all()
# initialize the tags
init_fields()
#add a user
user1 = new_user(uname='sakire', uemail='sakire@wsu.edu',passwd='1234')
# Insert user data
db.session.add(user1)
# Commit the changes for the users
db.session.commit()
yield # this is where the testing happens!
db.drop_all()
def test_register_page(test_client):
"""
GIVEN a Flask application configured for testing
WHEN the '/register' page is requested (GET)
THEN check that the response is valid
"""
# Create a test client using the Flask application configured for testing
response = test_client.get('/register')
assert response.status_code == 308
# assert b"Register" in response.data
def test_register(test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/register' form is submitted (POST)
THEN check that the response is valid and the database is updated correctly
"""
# Create a test client using the Flask application configured for testing
response = test_client.post('/register/',
data=dict(username='john', firstname='john', lastname='doe', email='john@wsu.edu', phone=1234567890, wsuid = 123456789, password="bad-bad-password",password2="bad-bad-password"),
follow_redirects = True)
assert response.status_code == 200
s = db.session.query(User).filter(User.username=='john')
assert s.first().email == 'john@wsu.edu'
assert s.count() == 1
assert b"Sign In" in response.data
assert b"Please log in to access this page." in response.data
def test_invalidlogin(test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/login' form is submitted (POST) with wrong credentials
THEN check that the response is valid and login is refused
"""
response = test_client.post('/login/',
data=dict(username='sakire', password='12345',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"Invalid username or password" in response.data
def test_login_logout(request,test_client,init_database):
"""
GIVEN a Flask application configured for testing
WHEN the '/login' form is submitted (POST) with correct credentials
THEN check that the response is valid and login is succesfull
"""
response = test_client.post('/login/',
data=dict(username='sakire', password='1234',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"WSU Undergraduate Research Portal" in response.data
response = test_client.get('/logout',
follow_redirects = True)
assert response.status_code == 200
assert b"Sign In" in response.data
def test_postResearch(test_client,init_database):
"""
GIVEN a Flask application configured for testing , after user logs in,
WHEN the '/postsmile' page is requested (GET) AND /PostForm' form is submitted (POST)
THEN check that response is valid and the class is successfully created in the database
"""
#login
response = test_client.post('/login/',
data=dict(username='sakire', password='1234',remember_me=False),
follow_redirects = True)
assert response.status_code == 200
assert b"WSU Undergraduate Research Portal" in response.data
#test the "PostSmile" form
response = test_client.get('/post/')
assert response.status_code == 302
| 35.147436
| 205
| 0.661864
|
85e4053b5cd4b308e5b63ce64344375f369df220
| 1,473
|
py
|
Python
|
webots_ros2_tesla/setup.py
|
harshag37/webots_ros2
|
08a061e73e3b88d57cc27b662be0f907d8b9f15b
|
[
"Apache-2.0"
] | 1
|
2021-09-09T13:11:15.000Z
|
2021-09-09T13:11:15.000Z
|
webots_ros2_tesla/setup.py
|
harshag37/webots_ros2
|
08a061e73e3b88d57cc27b662be0f907d8b9f15b
|
[
"Apache-2.0"
] | null | null | null |
webots_ros2_tesla/setup.py
|
harshag37/webots_ros2
|
08a061e73e3b88d57cc27b662be0f907d8b9f15b
|
[
"Apache-2.0"
] | null | null | null |
"""webots_ros2 package setup file."""
from setuptools import setup
package_name = 'webots_ros2_tesla'
data_files = []
data_files.append(('share/ament_index/resource_index/packages', ['resource/' + package_name]))
data_files.append(('share/' + package_name, ['launch/robot_launch.py']))
data_files.append(('share/' + package_name + '/worlds', [
'worlds/tesla_world.wbt', 'worlds/.tesla_world.wbproj',
]))
data_files.append(('share/' + package_name, ['package.xml']))
setup(
name=package_name,
version='1.1.1',
packages=[package_name],
data_files=data_files,
install_requires=['setuptools', 'launch'],
zip_safe=True,
author='Cyberbotics',
author_email='support@cyberbotics.com',
maintainer='Cyberbotics',
maintainer_email='support@cyberbotics.com',
keywords=['ROS', 'Webots', 'Robot', 'Simulation', 'Examples'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Tesla ROS2 interface for Webots.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'tesla_driver = webots_ros2_tesla.tesla_driver:main',
'lane_follower = webots_ros2_tesla.lane_follower:main'
],
'launch.frontend.launch_extension': ['launch_ros = launch_ros']
}
)
| 32.733333
| 94
| 0.669382
|
0cf45215ae7530cf7c95237821a6f9303c3afce5
| 5,503
|
py
|
Python
|
tests/system.py
|
renovate-bot/python-translate
|
1ab82aa12ecd4bbb0195e4c39ca476b944cdddbc
|
[
"Apache-2.0"
] | 70
|
2020-03-03T04:02:23.000Z
|
2022-03-29T20:09:22.000Z
|
tests/system.py
|
renovate-bot/python-translate
|
1ab82aa12ecd4bbb0195e4c39ca476b944cdddbc
|
[
"Apache-2.0"
] | 130
|
2020-01-31T20:17:09.000Z
|
2022-03-24T17:01:21.000Z
|
tests/system.py
|
renovate-bot/python-translate
|
1ab82aa12ecd4bbb0195e4c39ca476b944cdddbc
|
[
"Apache-2.0"
] | 47
|
2020-01-31T21:25:59.000Z
|
2022-03-31T20:52:21.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import unittest
from google.cloud import translate_v2
from google.cloud import translate
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT_V2 = None
CLIENT_V3 = None
location = "global"
project_id = os.environ["PROJECT_ID"]
use_mtls = os.environ.get("GOOGLE_API_USE_MTLS_ENDPOINT", "never")
def setUpModule():
Config.CLIENT_V2 = translate_v2.Client()
Config.CLIENT_V3 = translate.TranslationServiceClient()
# Only v3/v3beta1 clients have mTLS support, so we need to skip all the
# v2 client tests for mTLS testing.
skip_for_mtls = pytest.mark.skipif(
Config.use_mtls == "always", reason="Skip the v2 client test for mTLS testing"
)
class TestTranslate(unittest.TestCase):
@skip_for_mtls
def test_get_languages(self):
result = Config.CLIENT_V2.get_languages()
# There are **many** more than 10 languages.
self.assertGreater(len(result), 10)
lang_map = {val["language"]: val["name"] for val in result}
self.assertEqual(lang_map["en"], "English")
self.assertEqual(lang_map["ja"], "Japanese")
self.assertEqual(lang_map["lv"], "Latvian")
self.assertEqual(lang_map["zu"], "Zulu")
@skip_for_mtls
def test_detect_language(self):
values = ["takoy", "fa\xe7ade", "s'il vous plait"]
detections = Config.CLIENT_V2.detect_language(values)
self.assertEqual(len(values), len(detections))
self.assertEqual(detections[0]["language"], "ru")
self.assertEqual(detections[1]["language"], "fr")
self.assertEqual(detections[2]["language"], "fr")
@skip_for_mtls
def test_translate(self):
values = ["petnaest", "dek kvin", "Me llamo Jeff", "My name is Jeff"]
translations = Config.CLIENT_V2.translate(
values, target_language="de", model="nmt"
)
self.assertEqual(len(values), len(translations))
self.assertEqual(translations[0]["detectedSourceLanguage"].lower(), "hr")
self.assertEqual(translations[0]["translatedText"].lower(), "fünfzehn")
self.assertEqual(translations[1]["detectedSourceLanguage"], "eo")
self.assertEqual(translations[1]["translatedText"].lower(), "fünfzehn")
self.assertEqual(translations[2]["detectedSourceLanguage"], "es")
es_translation = translations[2]["translatedText"].lower()
self.assertTrue(
es_translation == "ich heiße jeff" or es_translation == "mein name ist jeff"
)
self.assertEqual(translations[3]["detectedSourceLanguage"], "en")
self.assertEqual(
translations[3]["translatedText"].lower(), "mein name ist jeff"
)
def test_get_languages_v3(self):
parent = f"projects/{Config.project_id}/locations/{Config.location}"
result = Config.CLIENT_V3.get_supported_languages(parent=parent)
languages = [lang.language_code for lang in result.languages]
self.assertGreater(
len(languages), 10
) # There are **many** more than 10 languages.
self.assertIn("zu", languages) # Zulu is supported
self.assertIn("fr", languages) # English is supported
self.assertIn("ga", languages) # Irish is supported
def test_detect_language_v3(self):
parent = f"projects/{Config.project_id}/locations/{Config.location}"
value = "s'il vous plait"
response = Config.CLIENT_V3.detect_language(
request={"parent": parent, "content": value, "mime_type": "text/plain"}
)
languages = [detection.language_code for detection in response.languages]
self.assertEqual(languages[0], "fr")
def test_translate_v3(self):
parent = f"projects/{Config.project_id}/locations/{Config.location}"
values = ["petnaest", "dek kvin", "Me llamo Jeff", "My name is Jeff"]
translations = Config.CLIENT_V3.translate_text(
parent=parent, contents=values, target_language_code="de"
)
results_map = {
result.detected_language_code: result.translated_text
for result in translations.translations
}
self.assertEqual(len(values), len(results_map))
self.assertIn("hr", results_map.keys())
self.assertIn("eo", results_map.keys())
self.assertIn("es", results_map.keys())
self.assertIn("en", results_map.keys())
self.assertEqual(results_map["hr"].lower(), "fünfzehn")
self.assertEqual(results_map["eo"].lower(), "fünfzehn")
es_translation = results_map["es"].lower()
self.assertTrue(
es_translation == "ich heiße jeff" or es_translation == "mein name ist jeff"
)
self.assertEqual(results_map["en"].lower(), "mein name ist jeff")
| 37.951724
| 88
| 0.665273
|
467685564e51beb48e7ff580c7706f7ef65eb98f
| 13,350
|
py
|
Python
|
congress/synchronizer/policy_rule_synchronizer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | null | null | null |
congress/synchronizer/policy_rule_synchronizer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | null | null | null |
congress/synchronizer/policy_rule_synchronizer.py
|
mail2nsrajesh/congress
|
a724dfb59c43a5e88e2b03e714a5f962d6976762
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 NEC Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import eventlet
from futurist import periodics
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from congress.datalog import base
from congress.datalog import compile
from congress.db import datasources
from congress.db import db_policy_rules
LOG = logging.getLogger(__name__)
SYNCHRONIZER_SERVICE_ID = '_policy_rule_synchronizer'
class PolicyRuleSynchronizer(object):
def __init__(self, service_obj, node):
self.name = SYNCHRONIZER_SERVICE_ID
self.engine = service_obj
self.sync_thread = None
self.periodic_tasks = None
self.node = node
def start(self):
callables = [(self.synchronize_all_policies, None, {}),
(self.synchronize_rules, None, {})]
self.periodic_tasks = periodics.PeriodicWorker(callables)
self.sync_thread = eventlet.spawn_n(self.periodic_tasks.start)
LOG.info("started policy-rule synchronizer on node %s",
self.node.node_id)
def stop(self):
if self.periodic_tasks:
self.periodic_tasks.stop()
self.periodic_tasks.wait()
self.periodic_tasks = None
if self.sync_thread is not None:
eventlet.greenthread.kill(self.sync_thread)
self.sync_thread = None
def _register_datasource_with_pe(self, ds_name):
"""create datasource policy in PE for newly created datasource."""
if not self.node.is_valid_service(ds_name):
# datasource service not up, nothing to register
return
# Get the datasource schema to sync the schema with PE
schema = self.node.invoke_service_rpc(ds_name, 'get_datasource_schema',
{'source_id': ds_name})
self.engine.initialize_datasource(ds_name, schema)
LOG.debug("registered datasource '%s' with PE on node %s", ds_name,
self.node.node_id)
def _sync_datasource_policies(self):
added = 0
removed = 0
db_datasources = [ds['name'] for ds in self.node.get_datasources()]
ds_policies = [p['name'] for p in
self._get_engine_policies(datasource=True)]
for ds in db_datasources:
# check if ds is registered with PE
if ds not in ds_policies:
self._register_datasource_with_pe(ds)
added = added + 1
# get the policies registered with PE , but not in database
remove_policies = list(set(ds_policies) - set(db_datasources))
for p in remove_policies:
self.engine.delete_policy(p)
removed = removed+1
LOG.debug("datasource policies synchronized, added %d removed %d ",
added, removed)
def _get_engine_policies(self, datasource=False):
all_policies = [self.engine.policy_object(n) for n in
self.engine.policy_names()]
dpolicies = [p for p in all_policies
if p.kind == base.DATASOURCE_POLICY_TYPE]
epolicies = list(set(all_policies) - set(dpolicies))
policies = dpolicies if datasource else epolicies
active_policies = []
for policy in policies:
active_policies.append({'id': policy.id,
'name': policy.name,
'abbreviation': policy.abbr,
'description': policy.desc,
'owner_id': policy.owner,
'kind': policy.kind})
return active_policies
@lockutils.synchronized('congress_synchronize_policies')
def sync_one_policy(self, name, datasource=True):
"""Synchronize single policy with DB.
:param name: policy name to be synchronized
:param datasource: True, to sync a datasource policy
"""
LOG.info("sync %s policy with DB", name)
if datasource:
policy_object = datasources.get_datasource_by_name(name)
if policy_object is not None:
if name not in self.engine.policy_names():
self._register_datasource_with_pe(name)
return
policy_object = db_policy_rules.get_policy_by_name(name)
if policy_object is None:
if name in self.engine.policy_names():
self.engine.delete_policy(name)
LOG.info("policy %s deleted by synchronizer", name)
return
p = policy_object.to_dict()
if name not in self.engine.policy_names():
self.engine.create_policy(
p['name'], id_=p['id'], abbr=p['abbreviation'],
kind=p['kind'], desc=p['description'],
owner=p['owner_id'])
LOG.debug("policy %s added by synchronizer", name)
elif p['id'] != self.engine.policy_object(name).id:
# if same name but not identical attributes
# replace by new policy obj according to DB
self.engine.delete_policy(name)
self.engine.create_policy(
p['name'], id_=p['id'], abbr=p['abbreviation'],
kind=p['kind'], desc=p['description'],
owner=p['owner_id'])
LOG.debug("synchronizer, policy replaced %s", name)
@periodics.periodic(spacing=cfg.CONF.datasource_sync_period)
@lockutils.synchronized('congress_synchronize_policies')
def synchronize_all_policies(self):
"""Function to synchronize im-mem policies with DB"""
added = 0
removed = 0
try:
db_policies = [p.to_dict() for p in db_policy_rules.get_policies()]
active_policies = self._get_engine_policies()
# Delete engine policies which are not in DB
for p in active_policies:
if p not in db_policies:
LOG.debug("removing policy %s", str(p))
self.engine.delete_policy(p['id'])
removed = removed + 1
# Add policies to PE, which are in DB
for p in db_policies:
if p not in active_policies:
LOG.debug("adding policy %s", str(p))
self.engine.create_policy(p['name'], id_=p['id'],
abbr=p['abbreviation'],
kind=p['kind'],
desc=p['description'],
owner=p['owner_id'])
added = added + 1
LOG.info("engine policies synchronized, added %d removed %d ",
added, removed)
# synchronize datasource policies
self._sync_datasource_policies()
LOG.info("completed synchronization of policies")
except Exception:
LOG.exception("Exception occurred in policy synchronizer periodic"
"task on node %s", self.node.node_id)
return
@periodics.periodic(spacing=cfg.CONF.datasource_sync_period)
@lockutils.synchronized('congress_synchronize_rules')
def synchronize_rules(self, db_session=None):
self.synchronize_rules_nonlocking(db_session=db_session)
def synchronize_rules_nonlocking(self, db_session=None):
LOG.debug("Synchronizing rules on node %s", self.node.node_id)
try:
# Read rules from DB.
configured_rules = []
configured_facts = []
for r in db_policy_rules.get_policy_rules(session=db_session):
if ':-' in r.rule: # if rule has body
configured_rules.append({'rule': r.rule,
'id': r.id,
'comment': r.comment,
'name': r.name,
'policy_name': r.policy_name})
else: # head-only rule, ie., fact
configured_facts.append(
{'rule': self.engine.parse1(r.rule).pretty_str(),
# note:parse to remove effect of extraneous formatting
'policy_name': r.policy_name})
# Read rules from engine
policies = {n: self.engine.policy_object(n) for n in
self.engine.policy_names()}
active_policy_rules = []
active_policy_facts = []
for policy_name, policy in policies.items():
if policy.kind != base.DATASOURCE_POLICY_TYPE:
for active_rule in policy.content():
# FIXME: This assumes r.original_str is None iff
# r is a head-only rule (fact). This works in
# non-recursive policy but not in recursive policies
if active_rule.original_str is None:
active_policy_facts.append(
{'rule': str(active_rule.head),
'policy_name': policy_name})
else:
active_policy_rules.append(
{'rule': active_rule.original_str,
'id': active_rule.id,
'comment': active_rule.comment,
'name': active_rule.name,
'policy_name': policy_name})
# ALEX: the Rule object does not have fields like the rule-string
# or id or comment. We can add those fields to the Rule object,
# as long as we don't add them to the Fact because there are many
# fact instances. If a user tries to create a lot of Rules, they
# are probably doing something wrong and should use a datasource
# driver instead.
changes = []
# add configured rules
for r in configured_rules:
if r not in active_policy_rules:
LOG.debug("adding rule %s", str(r))
parsed_rule = self.engine.parse1(r['rule'])
parsed_rule.set_id(r['id'])
parsed_rule.set_name(r['name'])
parsed_rule.set_comment(r['comment'])
parsed_rule.set_original_str(r['rule'])
event = compile.Event(formula=parsed_rule,
insert=True,
target=r['policy_name'])
changes.append(event)
# add configured facts
for r in configured_facts:
if r not in active_policy_facts:
LOG.debug("adding rule %s", str(r))
parsed_rule = self.engine.parse1(r['rule'])
event = compile.Event(formula=parsed_rule,
insert=True,
target=r['policy_name'])
changes.append(event)
# remove active rules not configured
for r in active_policy_rules:
if r not in configured_rules:
LOG.debug("removing rule %s", str(r))
parsed_rule = self.engine.parse1(r['rule'])
parsed_rule.set_id(r['id'])
parsed_rule.set_name(r['name'])
parsed_rule.set_comment(r['comment'])
parsed_rule.set_original_str(r['rule'])
event = compile.Event(formula=parsed_rule,
insert=False,
target=r['policy_name'])
changes.append(event)
# remove active facts not configured
for r in active_policy_facts:
if r not in configured_facts:
LOG.debug("removing rule %s", str(r))
parsed_rule = self.engine.parse1(r['rule'])
event = compile.Event(formula=parsed_rule,
insert=False,
target=r['policy_name'])
changes.append(event)
permitted, changes = self.engine.process_policy_update(changes)
LOG.info("synchronize_rules, permitted %d, made %d changes on "
"node %s", permitted, len(changes), self.node.node_id)
except Exception:
LOG.exception("synchronizing rules failed")
| 44.798658
| 79
| 0.546442
|
cf2b9a838100bd009f2a9928afaefe6424b11199
| 4,879
|
py
|
Python
|
test/STAvln_tb/test/t_STAvln_tb.py
|
hnikolov/pihdf
|
9a0d2add059db1ee90805e2124beff1fb5185fae
|
[
"MIT"
] | 2
|
2016-09-25T00:08:47.000Z
|
2016-10-09T10:09:55.000Z
|
test/STAvln_tb/test/t_STAvln_tb.py
|
hnikolov/pihdf
|
9a0d2add059db1ee90805e2124beff1fb5185fae
|
[
"MIT"
] | null | null | null |
test/STAvln_tb/test/t_STAvln_tb.py
|
hnikolov/pihdf
|
9a0d2add059db1ee90805e2124beff1fb5185fae
|
[
"MIT"
] | null | null | null |
import myhdl
import pihdf
from pihdf import Testable
import os, sys
sys.path.append(os.path.dirname(__file__) + "/../..")
from STAvln_tb.STAvln_tb import STAvln_tb
class t_STAvln_tb(Testable):
'''|
| Automatically generated. Do not modify this file.
|________'''
pihdf.head("T E S T S")
pihdf.info("Using myhdl version " + myhdl.__version__)
pihdf.info("Using pihdf version " + pihdf.__version__ + '\n')
def __init__(self):
# call base class constructor
Testable.__init__(self)
self.test_path = os.path.dirname(__file__)
self.cond_rx16 = []
self.stim_rx16 = []
self.cond_tx16 = []
self.res_tx16 = []
self.cond_rx32 = []
self.stim_rx32 = []
self.cond_tx32 = []
self.res_tx32 = []
self.cond_rx64 = []
self.stim_rx64 = []
self.cond_tx64 = []
self.res_tx64 = []
self.cond_ipg_rx16 = []
self.res_ipg_rx16 = []
self.cond_ipg_tx16 = []
self.res_ipg_tx16 = []
self.cond_sim_end = {}
self.tst_data = { "cond_rx16":self.cond_rx16,\
"stim_rx16":self.stim_rx16,\
"cond_tx16":self.cond_tx16,\
"res_tx16":self.res_tx16,\
"cond_rx32":self.cond_rx32,\
"stim_rx32":self.stim_rx32,\
"cond_tx32":self.cond_tx32,\
"res_tx32":self.res_tx32,\
"cond_rx64":self.cond_rx64,\
"stim_rx64":self.stim_rx64,\
"cond_tx64":self.cond_tx64,\
"res_tx64":self.res_tx64,\
"cond_ipg_rx16":self.cond_ipg_rx16,\
"res_ipg_rx16":self.res_ipg_rx16,\
"cond_ipg_tx16":self.cond_ipg_tx16,\
"res_ipg_tx16":self.res_ipg_tx16,\
"cond_sim_end": self.cond_sim_end }
self.ref_tx16 = []
self.ref_tx32 = []
self.ref_tx64 = []
self.ref_ipg_rx16 = []
self.ref_ipg_tx16 = []
self.ref_data = { "tx16":(self.ref_tx16, self.res_tx16),\
"tx32":(self.ref_tx32, self.res_tx32),\
"tx64":(self.ref_tx64, self.res_tx64),\
"ipg_rx16":(self.ref_ipg_rx16, self.res_ipg_rx16),\
"ipg_tx16":(self.ref_ipg_tx16, self.res_ipg_tx16) }
# Automatically executed BEFORE every test case
def setUp(self):
print ""
# Automatically executed AFTER every test case
def tearDown(self):
print ""
self.cond_rx16 = []
self.stim_rx16 = []
self.cond_tx16 = []
self.res_tx16 = []
self.ref_tx16 = []
self.cond_rx32 = []
self.stim_rx32 = []
self.cond_tx32 = []
self.res_tx32 = []
self.ref_tx32 = []
self.cond_rx64 = []
self.stim_rx64 = []
self.cond_tx64 = []
self.res_tx64 = []
self.ref_tx64 = []
self.cond_ipg_rx16 = []
self.res_ipg_rx16 = []
self.ref_ipg_rx16 = []
self.cond_ipg_tx16 = []
self.res_ipg_tx16 = []
self.ref_ipg_tx16 = []
# Data has been previously generated and written to files
def use_data_from_files(self):
self.stim_rx16.append({"file" : self.test_path + "/vectors/rx16.tvr"})
self.res_tx16.append({"file" : self.test_path + "/vectors/my_tx16.tvr"})
self.ref_tx16.append({"file" : self.test_path + "/vectors/tx16.tvr"})
self.stim_rx32.append({"file" : self.test_path + "/vectors/rx32.tvr"})
self.res_tx32.append({"file" : self.test_path + "/vectors/my_tx32.tvr"})
self.ref_tx32.append({"file" : self.test_path + "/vectors/tx32.tvr"})
self.stim_rx64.append({"file" : self.test_path + "/vectors/rx64.tvr"})
self.res_tx64.append({"file" : self.test_path + "/vectors/my_tx64.tvr"})
self.ref_tx64.append({"file" : self.test_path + "/vectors/tx64.tvr"})
self.res_ipg_rx16.append({"file" : self.test_path + "/vectors/my_ipg_rx16.tvr"})
self.ref_ipg_rx16.append({"file" : self.test_path + "/vectors/ipg_rx16.tvr"})
self.res_ipg_tx16.append({"file" : self.test_path + "/vectors/my_ipg_tx16.tvr"})
self.ref_ipg_tx16.append({"file" : self.test_path + "/vectors/ipg_tx16.tvr"})
self.checkfiles = True
self.run_it()
# Run the simulation and check the results
def run_it(self, checkfiles=False):
self.check_config("STAvln_tb")
STAvln_tb_dut = STAvln_tb(IMPL=self.models)
STAvln_tb_dut.Simulate(tb_config=self.tb_config, tst_data=self.tst_data, verbose=self.verbose)
STAvln_tb_dut.clean()
self.check_results()
| 37.530769
| 102
| 0.556466
|
5eae8527ec63d61d81be8b0f5582be7cef4cbd81
| 6,944
|
py
|
Python
|
lib/googlecloudsdk/api_lib/ai/tensorboard_experiments/client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/api_lib/ai/tensorboard_experiments/client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/ai/tensorboard_experiments/client.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for AI Platform Tensorboard experiments API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.api_lib.util import common_args
from googlecloudsdk.command_lib.ai import constants
from googlecloudsdk.command_lib.ai import errors
from googlecloudsdk.command_lib.util.args import labels_util
class TensorboardExperimentsClient(object):
"""High-level client for the AI Platform Tensorboard experiment surface."""
def __init__(self,
client=None,
messages=None,
version=constants.BETA_VERSION):
self.client = client or apis.GetClientInstance(
constants.AI_PLATFORM_API_NAME,
constants.AI_PLATFORM_API_VERSION[version])
self.messages = messages or self.client.MESSAGES_MODULE
self._service = self.client.projects_locations_tensorboards_experiments
self._version = version
def Create(self, tensorboard_ref, args):
if self._version == constants.ALPHA_VERSION:
return self.CreateAlpha(tensorboard_ref, args)
else:
return self.CreateBeta(tensorboard_ref, args)
def CreateBeta(self, tensorboard_ref, args):
"""Create a new Tensorboard experiment."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment
.LabelsValue)
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsCreateRequest(
parent=tensorboard_ref.RelativeName(),
googleCloudAiplatformV1beta1TensorboardExperiment=self.messages
.GoogleCloudAiplatformV1beta1TensorboardExperiment(
displayName=args.display_name,
description=args.description,
labels=labels),
tensorboardExperimentId=args.tensorboard_experiment_id)
return self._service.Create(request)
def CreateAlpha(self, tensorboard_ref, args):
"""Create a new Tensorboard experiment."""
labels = labels_util.ParseCreateArgs(
args, self.messages.GoogleCloudAiplatformV1alpha1TensorboardExperiment
.LabelsValue)
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsCreateRequest(
parent=tensorboard_ref.RelativeName(),
googleCloudAiplatformV1alpha1TensorboardExperiment=self.messages
.GoogleCloudAiplatformV1alpha1TensorboardExperiment(
displayName=args.display_name,
description=args.description,
labels=labels),
tensorboardExperimentId=args.tensorboard_experiment_id)
return self._service.Create(request)
def List(self, tensorboard_ref, limit=1000, page_size=50, sort_by=None):
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsListRequest(
parent=tensorboard_ref.RelativeName(),
orderBy=common_args.ParseSortByArg(sort_by))
return list_pager.YieldFromList(
self._service,
request,
field='tensorboardExperiments',
batch_size_attribute='pageSize',
batch_size=page_size,
limit=limit)
def Get(self, tensorboard_exp_ref):
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsGetRequest(
name=tensorboard_exp_ref.RelativeName())
return self._service.Get(request)
def Delete(self, tensorboard_exp_ref):
request = (
self.messages
.AiplatformProjectsLocationsTensorboardsExperimentsDeleteRequest(
name=tensorboard_exp_ref.RelativeName()))
return self._service.Delete(request)
def Patch(self, tensorboard_exp_ref, args):
if self._version == constants.ALPHA_VERSION:
return self.PatchAlpha(tensorboard_exp_ref, args)
else:
return self.PatchBeta(tensorboard_exp_ref, args)
def PatchBeta(self, tensorboard_exp_ref, args):
"""Update a Tensorboard experiment."""
tensorboard_exp = (
self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment())
update_mask = []
def GetLabels():
return self.Get(tensorboard_exp_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1beta1TensorboardExperiment
.LabelsValue, GetLabels)
if labels_update.needs_update:
tensorboard_exp.labels = labels_update.labels
update_mask.append('labels')
if args.display_name is not None:
tensorboard_exp.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
tensorboard_exp.description = args.description
update_mask.append('description')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsPatchRequest(
name=tensorboard_exp_ref.RelativeName(),
googleCloudAiplatformV1beta1TensorboardExperiment=tensorboard_exp,
updateMask=','.join(update_mask))
return self._service.Patch(request)
def PatchAlpha(self, tensorboard_exp_ref, args):
"""Update a Tensorboard experiment."""
tensorboard_exp = (
self.messages.GoogleCloudAiplatformV1alpha1TensorboardExperiment())
update_mask = []
def GetLabels():
return self.Get(tensorboard_exp_ref).labels
labels_update = labels_util.ProcessUpdateArgsLazy(
args, self.messages.GoogleCloudAiplatformV1alpha1TensorboardExperiment
.LabelsValue, GetLabels)
if labels_update.needs_update:
tensorboard_exp.labels = labels_update.labels
update_mask.append('labels')
if args.display_name is not None:
tensorboard_exp.displayName = args.display_name
update_mask.append('display_name')
if args.description is not None:
tensorboard_exp.description = args.description
update_mask.append('description')
if not update_mask:
raise errors.NoFieldsSpecifiedError('No updates requested.')
request = self.messages.AiplatformProjectsLocationsTensorboardsExperimentsPatchRequest(
name=tensorboard_exp_ref.RelativeName(),
googleCloudAiplatformV1alpha1TensorboardExperiment=tensorboard_exp,
updateMask=','.join(update_mask))
return self._service.Patch(request)
| 39.908046
| 92
| 0.751584
|
6575731d6bf724bf7d4e0dcef7249d963039688c
| 9,059
|
py
|
Python
|
markdown_it/rules_block/blockquote.py
|
ExecutableBookProject/markdown-it-py
|
53084e1ffa82323e37fe2d17a1b53d1dc66e5afd
|
[
"MIT"
] | 12
|
2020-03-26T08:00:43.000Z
|
2020-04-23T09:10:36.000Z
|
markdown_it/rules_block/blockquote.py
|
sthagen/executablebooks-markdown-it-py
|
53084e1ffa82323e37fe2d17a1b53d1dc66e5afd
|
[
"MIT"
] | 9
|
2020-03-25T11:36:16.000Z
|
2020-04-23T18:07:16.000Z
|
markdown_it/rules_block/blockquote.py
|
sthagen/executablebooks-markdown-it-py
|
53084e1ffa82323e37fe2d17a1b53d1dc66e5afd
|
[
"MIT"
] | 1
|
2020-04-01T16:12:38.000Z
|
2020-04-01T16:12:38.000Z
|
# Block quotes
from __future__ import annotations
import logging
from ..common.utils import isSpace
from .state_block import StateBlock
LOGGER = logging.getLogger(__name__)
def blockquote(state: StateBlock, startLine: int, endLine: int, silent: bool):
LOGGER.debug(
"entering blockquote: %s, %s, %s, %s", state, startLine, endLine, silent
)
oldLineMax = state.lineMax
pos = state.bMarks[startLine] + state.tShift[startLine]
max = state.eMarks[startLine]
# if it's indented more than 3 spaces, it should be a code block
if (state.sCount[startLine] - state.blkIndent) >= 4:
return False
# check the block quote marker
if state.srcCharCode[pos] != 0x3E: # /* > */
return False
pos += 1
# we know that it's going to be a valid blockquote,
# so no point trying to find the end of it in silent mode
if silent:
return True
# set offset past spaces and ">"
initial = offset = state.sCount[startLine] + 1
try:
second_char_code: int | None = state.srcCharCode[pos]
except IndexError:
second_char_code = None
# skip one optional space after '>'
if second_char_code == 0x20: # /* space */
# ' > test '
# ^ -- position start of line here:
pos += 1
initial += 1
offset += 1
adjustTab = False
spaceAfterMarker = True
elif second_char_code == 0x09: # /* tab */
spaceAfterMarker = True
if (state.bsCount[startLine] + offset) % 4 == 3:
# ' >\t test '
# ^ -- position start of line here (tab has width==1)
pos += 1
initial += 1
offset += 1
adjustTab = False
else:
# ' >\t test '
# ^ -- position start of line here + shift bsCount slightly
# to make extra space appear
adjustTab = True
else:
spaceAfterMarker = False
oldBMarks = [state.bMarks[startLine]]
state.bMarks[startLine] = pos
while pos < max:
ch = state.srcCharCode[pos]
if isSpace(ch):
if ch == 0x09: # / tab /
offset += (
4
- (offset + state.bsCount[startLine] + (1 if adjustTab else 0)) % 4
)
else:
offset += 1
else:
break
pos += 1
oldBSCount = [state.bsCount[startLine]]
state.bsCount[startLine] = (
state.sCount[startLine] + 1 + (1 if spaceAfterMarker else 0)
)
lastLineEmpty = pos >= max
oldSCount = [state.sCount[startLine]]
state.sCount[startLine] = offset - initial
oldTShift = [state.tShift[startLine]]
state.tShift[startLine] = pos - state.bMarks[startLine]
terminatorRules = state.md.block.ruler.getRules("blockquote")
oldParentType = state.parentType
state.parentType = "blockquote"
# Search the end of the block
#
# Block ends with either:
# 1. an empty line outside:
# ```
# > test
#
# ```
# 2. an empty line inside:
# ```
# >
# test
# ```
# 3. another tag:
# ```
# > test
# - - -
# ```
# for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
nextLine = startLine + 1
while nextLine < endLine:
# check if it's outdented, i.e. it's inside list item and indented
# less than said list item:
#
# ```
# 1. anything
# > current blockquote
# 2. checking this line
# ```
isOutdented = state.sCount[nextLine] < state.blkIndent
pos = state.bMarks[nextLine] + state.tShift[nextLine]
max = state.eMarks[nextLine]
if pos >= max:
# Case 1: line is not inside the blockquote, and this line is empty.
break
evaluatesTrue = state.srcCharCode[pos] == 0x3E and not isOutdented # /* > */
pos += 1
if evaluatesTrue:
# This line is inside the blockquote.
# set offset past spaces and ">"
initial = offset = state.sCount[nextLine] + 1
try:
next_char: int | None = state.srcCharCode[pos]
except IndexError:
next_char = None
# skip one optional space after '>'
if next_char == 0x20: # /* space */
# ' > test '
# ^ -- position start of line here:
pos += 1
initial += 1
offset += 1
adjustTab = False
spaceAfterMarker = True
elif next_char == 0x09: # /* tab */
spaceAfterMarker = True
if (state.bsCount[nextLine] + offset) % 4 == 3:
# ' >\t test '
# ^ -- position start of line here (tab has width==1)
pos += 1
initial += 1
offset += 1
adjustTab = False
else:
# ' >\t test '
# ^ -- position start of line here + shift bsCount slightly
# to make extra space appear
adjustTab = True
else:
spaceAfterMarker = False
oldBMarks.append(state.bMarks[nextLine])
state.bMarks[nextLine] = pos
while pos < max:
ch = state.srcCharCode[pos]
if isSpace(ch):
if ch == 0x09:
offset += (
4
- (
offset
+ state.bsCount[nextLine]
+ (1 if adjustTab else 0)
)
% 4
)
else:
offset += 1
else:
break
pos += 1
lastLineEmpty = pos >= max
oldBSCount.append(state.bsCount[nextLine])
state.bsCount[nextLine] = (
state.sCount[nextLine] + 1 + (1 if spaceAfterMarker else 0)
)
oldSCount.append(state.sCount[nextLine])
state.sCount[nextLine] = offset - initial
oldTShift.append(state.tShift[nextLine])
state.tShift[nextLine] = pos - state.bMarks[nextLine]
nextLine += 1
continue
# Case 2: line is not inside the blockquote, and the last line was empty.
if lastLineEmpty:
break
# Case 3: another tag found.
terminate = False
for terminatorRule in terminatorRules:
if terminatorRule(state, nextLine, endLine, True):
terminate = True
break
if terminate:
# Quirk to enforce "hard termination mode" for paragraphs;
# normally if you call `tokenize(state, startLine, nextLine)`,
# paragraphs will look below nextLine for paragraph continuation,
# but if blockquote is terminated by another tag, they shouldn't
state.lineMax = nextLine
if state.blkIndent != 0:
# state.blkIndent was non-zero, we now set it to zero,
# so we need to re-calculate all offsets to appear as
# if indent wasn't changed
oldBMarks.append(state.bMarks[nextLine])
oldBSCount.append(state.bsCount[nextLine])
oldTShift.append(state.tShift[nextLine])
oldSCount.append(state.sCount[nextLine])
state.sCount[nextLine] -= state.blkIndent
break
oldBMarks.append(state.bMarks[nextLine])
oldBSCount.append(state.bsCount[nextLine])
oldTShift.append(state.tShift[nextLine])
oldSCount.append(state.sCount[nextLine])
# A negative indentation means that this is a paragraph continuation
#
state.sCount[nextLine] = -1
nextLine += 1
oldIndent = state.blkIndent
state.blkIndent = 0
token = state.push("blockquote_open", "blockquote", 1)
token.markup = ">"
token.map = lines = [startLine, 0]
state.md.block.tokenize(state, startLine, nextLine)
token = state.push("blockquote_close", "blockquote", -1)
token.markup = ">"
state.lineMax = oldLineMax
state.parentType = oldParentType
lines[1] = state.line
# Restore original tShift; this might not be necessary since the parser
# has already been here, but just to make sure we can do that.
for i, item in enumerate(oldTShift):
state.bMarks[i + startLine] = oldBMarks[i]
state.tShift[i + startLine] = item
state.sCount[i + startLine] = oldSCount[i]
state.bsCount[i + startLine] = oldBSCount[i]
state.blkIndent = oldIndent
return True
| 30.196667
| 87
| 0.520035
|
b1b0fd73c118fe0527e5418fbc61bf5c7893bc9f
| 3,333
|
py
|
Python
|
sql_to_django/lib.py
|
rimi-dev/sql_to_orm
|
ef58cac457182fa8d30c729d15dd3fbc9347f2fc
|
[
"MIT"
] | 1
|
2021-03-11T11:52:14.000Z
|
2021-03-11T11:52:14.000Z
|
sql_to_django/lib.py
|
rimi-dev/sql_to_orm
|
ef58cac457182fa8d30c729d15dd3fbc9347f2fc
|
[
"MIT"
] | 8
|
2020-06-03T06:15:39.000Z
|
2021-09-22T19:09:18.000Z
|
sql_to_django/lib.py
|
rimi-dev/sql_to_orm
|
ef58cac457182fa8d30c729d15dd3fbc9347f2fc
|
[
"MIT"
] | null | null | null |
import re
from common.lib import list_whitespace_remove, list_to_dict
class Table:
"""
Table class
Author : rimi
Date : 2020. 05. 27
Description : get/set main table, get/set joined tables
"""
main_named_table = ''
main_table = ''
joined_table = []
@classmethod
def update_main_table(cls, table):
if len(table) > 1:
cls.main_named_table = table[1]
cls.main_table = table[0]
print(table[0])
@classmethod
def update_joined_table(cls, **kwargs):
print(kwargs)
cls.joined_table.append(kwargs)
@classmethod
def get_main_table(cls):
return cls.main_table
@classmethod
def get_main_named_table(cls):
return cls.main_named_table
@classmethod
def get_joined_table(cls):
return cls.joined_table
class Select:
def __init__(self, query):
query = re.split(r'from', query)
target = list_whitespace_remove(query)
table_name = target[1].split()
columns = target[0].split(', ')
value_columns = ''
Table.update_main_table(table_name)
main_table_named = Table.get_main_named_table()
for item in columns:
if main_table_named in item:
if '*' in target[0]:
value_columns += ''
else:
value_columns += f'"{item}", '
self._orm = f'{Table.get_main_table()}.objects.values({value_columns})'
def get_orm(self):
return self._orm
class Join:
@classmethod
def inner_join(cls, **kwargs):
query = kwargs['query'].split()
Table.update_joined_table(table=query[0], named=query[1])
print(Table.get_joined_table())
@classmethod
def left_outer_join(cls, **kwargs):
print(kwargs)
def get_orm(self):
return self._orm
class OrderBy:
def __init__(self, query):
order_by_query = query.split()
len_order_by_query = len(order_by_query)
column = order_by_query[0]
main_table_named = Table.get_main_named_table()
if main_table_named:
# table name 이 있을경우
if main_table_named in column:
column = column.split('.')[1]
# To sort the records in descending order
sort = ''
if len_order_by_query > 1:
if order_by_query[1].lower() == 'desc':
sort = '-'
self._orm = f'.order_by("{sort}{column}")'
def get_orm(self):
return self._orm
class Where:
def __init__(self, query):
and_re = re.split(r'and', query, re.I)
print(and_re)
if 'and' in query:
pass
if 'or' in query:
pass
self._orm = f'.filter({query})'
def get_orm(self):
return self._orm
class QueryFuncManager:
_queryMappingTable = {
"select": Select,
"where": Where,
"inner join": Join.inner_join,
"left outer join": Join,
"order by": OrderBy,
}
@classmethod
def get_query(cls, contentType, *args, **kwargs):
try:
query_func = cls._queryMappingTable.get(contentType)
return query_func(*args, **kwargs)
except KeyError:
raise Exception(f"{contentType} is invalid content type")
| 25.638462
| 79
| 0.582658
|
279d44ff0c86e62b60408df23733f20821a01e47
| 2,527
|
py
|
Python
|
app/main/forms.py
|
tianmaxingkonggrant/tianmaflaskblog
|
aa7e0bcdec4207455456858783f89acec8e3c34e
|
[
"MIT"
] | 1
|
2016-07-10T02:36:58.000Z
|
2016-07-10T02:36:58.000Z
|
app/main/forms.py
|
tianmaxingkonggrant/tianmaflaskblog
|
aa7e0bcdec4207455456858783f89acec8e3c34e
|
[
"MIT"
] | 10
|
2020-03-24T15:35:38.000Z
|
2022-03-11T23:16:34.000Z
|
app/main/forms.py
|
tianmaxingkonggrant/tianmaflaskblog
|
aa7e0bcdec4207455456858783f89acec8e3c34e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, TextAreaField, \
BooleanField, SelectField, FileField
from wtforms.validators import DataRequired, Length, Email, Regexp
from ..models import Role, User
from wtforms import ValidationError
from flask.ext.pagedown.fields import PageDownField
class EditProfileForm(Form):
username = StringField('用户名',validators=[DataRequired(message='用户名不能为空'),
Length(1,64),
Regexp(ur"^([0-9A-Za-z]|[\u4e00-\u9fa5])+([0-9A-Za-z]|[\u4e00-\u9fa5])*$", 0, message='用户名不能是特殊字符,比如@¥%.')])
name =StringField('姓名:', validators=[Length(0,64)])
location = StringField('住址:', validators=[Length(0, 64)])
about_me = TextAreaField('关于我:', validators=[Length(0, 40, message='简介不得超过35个字.')])
submit = SubmitField('保存资料')
class EditProfileAdminForm(Form):
email = StringField('邮箱', validators=[Email(message='邮箱格式不正确'),DataRequired(message='请填写邮箱'),Length(1,64)])
username = StringField('用户名',validators=[DataRequired(message='用户名不能为空'),
Length(1,64),Regexp(ur"^([0-9A-Za-z]|[\u4e00-\u9fa5])+([0-9A-Za-z]|[\u4e00-\u9fa5])*$", 0, message='用户名不能是特殊字符,比如@¥%.')])
confirmed = BooleanField('确认')
role = SelectField('角色',coerce=int)
name =StringField('姓名:', validators=[Length(0,64)])
location = StringField('住址:', validators=[Length(0, 64)])
about_me = TextAreaField('关于我:', validators=[Length(0, 40, message='简介不得超过35个字.')])
submit = SubmitField('保存资料')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.\
query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and User.query.filter_by(email=field.data)\
.first():
raise ValidationError('该邮箱已注册.')
def validate_username(self, field):
if field.data != self.user.username and User.query.filter_by(username=field.\
data).first():
raise ValidationError('该用户名已注册.')
class PostForm(Form):
title = StringField('', validators=[DataRequired(message='请填写标题'),Length(1,128)])
body = TextAreaField('', validators=[DataRequired(message='请填写内容')])
submit = SubmitField('发表')
class CommentForm(Form):
body = StringField('', validators=[DataRequired(message='请填写评论内容')])
submit = SubmitField('评论')
class ImgForm(Form):
picture = FileField('上传照片')
submit = SubmitField('保存')
class SearchForm(Form):
search = StringField('', validators=[DataRequired()])
submit=SubmitField('搜索')
| 37.161765
| 123
| 0.708746
|
860c864190bbd41515d20b173bbed34d5c81ef1d
| 569
|
py
|
Python
|
src/model_utils.py
|
niteya-shah/Text-Recognition-using-GRU
|
97618cf4c439f659c4744336cda0d30bf3e6c045
|
[
"MIT"
] | null | null | null |
src/model_utils.py
|
niteya-shah/Text-Recognition-using-GRU
|
97618cf4c439f659c4744336cda0d30bf3e6c045
|
[
"MIT"
] | null | null | null |
src/model_utils.py
|
niteya-shah/Text-Recognition-using-GRU
|
97618cf4c439f659c4744336cda0d30bf3e6c045
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
def ctc_loss_func(args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, 2:, :]
input_length = input_length - 2
return tf.keras.backend.ctc_batch_cost(labels, y_pred,
input_length, label_length)
def ctc_decode_func(args):
y_pred, seq_len = args
y_pred, _ = tf.keras.backend.ctc_decode(y_pred, tf.squeeze(seq_len))
return y_pred[0]
def norm_func(i):
return np.subtract(np.array(i), np.mean(np.array(i)))/np.var(np.array(i))
| 27.095238
| 77
| 0.653779
|
fac2837a1746c9c28b4ccc89b3baa06cabdb7f6a
| 11,150
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200301/get_virtual_network.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200301/get_virtual_network.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200301/get_virtual_network.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkResult',
'AwaitableGetVirtualNetworkResult',
'get_virtual_network',
]
@pulumi.output_type
class GetVirtualNetworkResult:
"""
Virtual Network resource.
"""
def __init__(__self__, address_space=None, bgp_communities=None, ddos_protection_plan=None, dhcp_options=None, enable_ddos_protection=None, enable_vm_protection=None, etag=None, id=None, ip_allocations=None, location=None, name=None, provisioning_state=None, resource_guid=None, subnets=None, tags=None, type=None, virtual_network_peerings=None):
if address_space and not isinstance(address_space, dict):
raise TypeError("Expected argument 'address_space' to be a dict")
pulumi.set(__self__, "address_space", address_space)
if bgp_communities and not isinstance(bgp_communities, dict):
raise TypeError("Expected argument 'bgp_communities' to be a dict")
pulumi.set(__self__, "bgp_communities", bgp_communities)
if ddos_protection_plan and not isinstance(ddos_protection_plan, dict):
raise TypeError("Expected argument 'ddos_protection_plan' to be a dict")
pulumi.set(__self__, "ddos_protection_plan", ddos_protection_plan)
if dhcp_options and not isinstance(dhcp_options, dict):
raise TypeError("Expected argument 'dhcp_options' to be a dict")
pulumi.set(__self__, "dhcp_options", dhcp_options)
if enable_ddos_protection and not isinstance(enable_ddos_protection, bool):
raise TypeError("Expected argument 'enable_ddos_protection' to be a bool")
pulumi.set(__self__, "enable_ddos_protection", enable_ddos_protection)
if enable_vm_protection and not isinstance(enable_vm_protection, bool):
raise TypeError("Expected argument 'enable_vm_protection' to be a bool")
pulumi.set(__self__, "enable_vm_protection", enable_vm_protection)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_allocations and not isinstance(ip_allocations, list):
raise TypeError("Expected argument 'ip_allocations' to be a list")
pulumi.set(__self__, "ip_allocations", ip_allocations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if subnets and not isinstance(subnets, list):
raise TypeError("Expected argument 'subnets' to be a list")
pulumi.set(__self__, "subnets", subnets)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_peerings and not isinstance(virtual_network_peerings, list):
raise TypeError("Expected argument 'virtual_network_peerings' to be a list")
pulumi.set(__self__, "virtual_network_peerings", virtual_network_peerings)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional['outputs.AddressSpaceResponse']:
"""
The AddressSpace that contains an array of IP address ranges that can be used by subnets.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpCommunities")
def bgp_communities(self) -> Optional['outputs.VirtualNetworkBgpCommunitiesResponse']:
"""
Bgp Communities sent over ExpressRoute with each route corresponding to a prefix in this VNET.
"""
return pulumi.get(self, "bgp_communities")
@property
@pulumi.getter(name="ddosProtectionPlan")
def ddos_protection_plan(self) -> Optional['outputs.SubResourceResponse']:
"""
The DDoS protection plan associated with the virtual network.
"""
return pulumi.get(self, "ddos_protection_plan")
@property
@pulumi.getter(name="dhcpOptions")
def dhcp_options(self) -> Optional['outputs.DhcpOptionsResponse']:
"""
The dhcpOptions that contains an array of DNS servers available to VMs deployed in the virtual network.
"""
return pulumi.get(self, "dhcp_options")
@property
@pulumi.getter(name="enableDdosProtection")
def enable_ddos_protection(self) -> Optional[bool]:
"""
Indicates if DDoS protection is enabled for all the protected resources in the virtual network. It requires a DDoS protection plan associated with the resource.
"""
return pulumi.get(self, "enable_ddos_protection")
@property
@pulumi.getter(name="enableVmProtection")
def enable_vm_protection(self) -> Optional[bool]:
"""
Indicates if VM protection is enabled for all the subnets in the virtual network.
"""
return pulumi.get(self, "enable_vm_protection")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAllocations")
def ip_allocations(self) -> Optional[Sequence['outputs.SubResourceResponse']]:
"""
Array of IpAllocation which reference this VNET.
"""
return pulumi.get(self, "ip_allocations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the virtual network resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resourceGuid property of the Virtual Network resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def subnets(self) -> Optional[Sequence['outputs.SubnetResponse']]:
"""
A list of subnets in a Virtual Network.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkPeerings")
def virtual_network_peerings(self) -> Optional[Sequence['outputs.VirtualNetworkPeeringResponse']]:
"""
A list of peerings in a Virtual Network.
"""
return pulumi.get(self, "virtual_network_peerings")
class AwaitableGetVirtualNetworkResult(GetVirtualNetworkResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkResult(
address_space=self.address_space,
bgp_communities=self.bgp_communities,
ddos_protection_plan=self.ddos_protection_plan,
dhcp_options=self.dhcp_options,
enable_ddos_protection=self.enable_ddos_protection,
enable_vm_protection=self.enable_vm_protection,
etag=self.etag,
id=self.id,
ip_allocations=self.ip_allocations,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
subnets=self.subnets,
tags=self.tags,
type=self.type,
virtual_network_peerings=self.virtual_network_peerings)
def get_virtual_network(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkResult:
"""
Virtual Network resource.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_name: The name of the virtual network.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkName'] = virtual_network_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200301:getVirtualNetwork', __args__, opts=opts, typ=GetVirtualNetworkResult).value
return AwaitableGetVirtualNetworkResult(
address_space=__ret__.address_space,
bgp_communities=__ret__.bgp_communities,
ddos_protection_plan=__ret__.ddos_protection_plan,
dhcp_options=__ret__.dhcp_options,
enable_ddos_protection=__ret__.enable_ddos_protection,
enable_vm_protection=__ret__.enable_vm_protection,
etag=__ret__.etag,
id=__ret__.id,
ip_allocations=__ret__.ip_allocations,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
subnets=__ret__.subnets,
tags=__ret__.tags,
type=__ret__.type,
virtual_network_peerings=__ret__.virtual_network_peerings)
| 40.107914
| 350
| 0.66852
|
1070c0d9bb8abba0b74bc476303e820110a387eb
| 959
|
py
|
Python
|
main.py
|
jobinrjohnson/QSim
|
fc2b45229acb82c87b6f50b567611d8d8abd4dd4
|
[
"Apache-2.0"
] | 4
|
2019-05-03T18:58:37.000Z
|
2022-03-05T20:22:06.000Z
|
main.py
|
jobinrjohnson/QSim
|
fc2b45229acb82c87b6f50b567611d8d8abd4dd4
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
jobinrjohnson/QSim
|
fc2b45229acb82c87b6f50b567611d8d8abd4dd4
|
[
"Apache-2.0"
] | 4
|
2019-05-06T09:04:35.000Z
|
2021-11-12T08:04:28.000Z
|
from flask import Flask, jsonify, request, render_template, make_response
import time
from subprocess import Popen, PIPE
import os
app = Flask(__name__, static_folder="web", template_folder="web")
def serve_homepage():
return render_template("main.html")
def process_post():
code = request.form['code']
file_name = "temp/" + str(time.time()) + ".txt"
file1 = open(file_name, "w")
file1.write(code)
file1.close()
process = Popen(["build/QSim", file_name], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
return output
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
return process_post()
else:
return serve_homepage()
@app.route('/sw.js')
def sw_js():
headers = {'Content-Type': 'application/javascript'}
return make_response(render_template('sw.js'),200,headers)
if __name__ == '__main__':
app.run()
| 21.795455
| 73
| 0.656934
|
7523a15cd7a28d9810304a82c245e0156af6ac1c
| 2,729
|
py
|
Python
|
ex2/ex2/Regularized-logistic-regression.py
|
LixiangHan/cs229-assignments
|
3ed75e9c95b60e8c58a86c5adce3bd7cd4ea74a0
|
[
"MIT"
] | null | null | null |
ex2/ex2/Regularized-logistic-regression.py
|
LixiangHan/cs229-assignments
|
3ed75e9c95b60e8c58a86c5adce3bd7cd4ea74a0
|
[
"MIT"
] | null | null | null |
ex2/ex2/Regularized-logistic-regression.py
|
LixiangHan/cs229-assignments
|
3ed75e9c95b60e8c58a86c5adce3bd7cd4ea74a0
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optimize
def feature_map(X):
m = len(X)
x_1 = X[:, 0][np.newaxis].T
x_2 = X[:, 1][np.newaxis].T
X = np.concatenate(
(
np.ones((m, 1)),
x_1,
x_2,
x_1**2,
x_1 * x_2,
x_2**2,
x_1**3,
x_1**2 * x_2,
x_1 * x_2**2,
x_2**3,
x_1**4,
x_1**3 * x_2,
x_1**2 * x_2**2,
x_1**1 * x_2**3,
x_2**4,
x_1**5,
x_1**4 * x_2,
x_1**3 * x_2**2,
x_1**2 * x_2**3,
x_1**1 * x_2**4,
x_2**5,
x_1**6,
x_1**5 * x_2,
x_1**4 * x_2**2,
x_1**3 * x_2**3,
x_1**2 * x_2**4,
x_1 * x_2**5,
x_2**6
),axis=1)
return X
def sigmoid(z):
a = 1 + np.exp(-z)
return 1 / a
def cost_function(theta, X, y, _lambda):
m = len(X)
h_theta = sigmoid(np.dot(X, theta[np.newaxis].T))
a = - np.dot(y.T, np.log(h_theta))
b = - np.dot((1 - y).T, np.log(1 - h_theta))
c = a + b
d = _lambda * np.dot(theta[1:], theta[1:].T) / m / 2 # only if j >= 1
J_val = c / m + d
return J_val[0][0]
def gradient(theta, X, y, _lambda):
m = len(X)
h_theta = sigmoid(np.dot(X, theta[np.newaxis].T))
# Calculate gradient descent
a = h_theta - y
b = _lambda * theta / m
gradience = (np.dot(X.T, a) / m).ravel()
gradience[1:] = gradience[1:] + b[:-1] # theta[j]: only if j >= 1
return gradience
def predict(x, theta):
'''
x = array(m, n)
theta = array(n,)
'''
pred = np.zeros((len(x), 1))
a = sigmoid(np.dot(x, theta[np.newaxis].T))
pred[sigmoid(np.dot(x, theta[np.newaxis].T)) >= .5] = 1
return pred
def main():
data = np.loadtxt('./ex2/ex2/ex2data2.txt', delimiter=',')
X = data[:, :-1]
y = data[:, -1][np.newaxis].T
X = feature_map(X)
theta = np.zeros((28,), dtype=np.float)
_lambda = 0.1
res = optimize.minimize(cost_function, theta, (X, y, _lambda), 'BFGS', gradient)
theta = res.x
# Plot decision boundary
_x, _y = np.meshgrid(np.arange(-1, 1.5, 0.01), np.arange(-1, 1.5, 0.01))
_X = np.c_[_x.reshape(-1, 1), _y.reshape(-1, 1)]
_X = feature_map(_X)
z = predict(_X, theta)
z = z.reshape(_x.shape)
plt.contour(_x, _y, z, cmap=plt.cm.Spectral)
# Plot data
plt.scatter(X[:, 1], X[:, 2], c = y.ravel(), cmap=plt.cm.Spectral)
plt.xlabel('Microchip test 1')
plt.ylabel('Microchip test 2')
plt.show()
if __name__ == '__main__':
main()
| 23.324786
| 84
| 0.474166
|
d08d6ce921c51ec1611aba399b9391570e04f47a
| 7,624
|
py
|
Python
|
nws.py
|
drsjb80/conky
|
314813c26ae294db7b3d02cb7350e47486d0a979
|
[
"MIT"
] | null | null | null |
nws.py
|
drsjb80/conky
|
314813c26ae294db7b3d02cb7350e47486d0a979
|
[
"MIT"
] | null | null | null |
nws.py
|
drsjb80/conky
|
314813c26ae294db7b3d02cb7350e47486d0a979
|
[
"MIT"
] | null | null | null |
# vim: ts=4 sw=4 expandtab
import io
import re
import sys
import os.path
import urllib.request
import xml.etree.ElementTree
LAYOUT_KEY = 'k-p12h-n14-1'
def print_forecast(zipped):
''' Print out all the forecast information in conky format. '''
count = 0
for i in zipped:
offset = 100 if count % 2 == 1 else 0
row = int(count/2)
print("${{image {} -p {},{}}}".format(get_icon(i[0]), offset, \
120 + row * 100, end=''))
print('${{goto 200}}{}: {}'.format(i[1].attrib['period-name'], \
i[2].attrib["weather-summary"]), end='')
if count != 0 and count % 2 == 1:
print()
print('${{goto 200}}{}/{}'.format(save, i[3].text if i[3] is not None else 'NA'))
print('${voffset 9}')
else:
save = i[3].text
count += 1
def get_icon(url):
''' Retrieve an icon and return the file name where it was saved. '''
filename = '/tmp/' + re.sub(r'.*/', '', url.text)
if not os.path.isfile(filename):
try:
urllib.request.urlretrieve(url.text, filename)
except ValueError as ve:
print(ve)
print(url)
print(filename)
return filename
class Forecast:
''' A class to collect and display the NWS forecast information. '''
def __init__(self, forecast):
self.forecast = forecast
self.forecast_parameters = self.forecast.find('./parameters')
def get_forecast_location(self):
''' Return where the forecast is actually for. '''
return self.forecast.find('./location').find('./point') or 'NA'
def get_forecast_maximum_temperatures(self):
''' Return a list of maximum forecasted temperatures. '''
return self.forecast_parameters.findall('./temperature[@type="maximum"]/value')
def get_forecast_minimum_temperatures(self):
''' Return a list of minimum forecasted temperatures. '''
return self.forecast_parameters.findall('./temperature[@type="minimum"]/value')
def get_forecast_weather(self):
''' Return a list of words for forecasts. '''
return self.forecast_parameters.findall('./weather[@time-layout="' + \
LAYOUT_KEY + '"]/weather-conditions')
def get_forecast_conditions_icon(self):
''' Return a list of icons for forecasts. '''
return self.forecast_parameters.findall('./conditions-icon/icon-link')
def get_worded_forecast(self):
''' Return a list of long words for forecasts. '''
return self.forecast_parameters.findall('./wordedForecast/text')
def get_forecast_days(self):
''' Return a list of days that the forecasts refer to. '''
return self.forecast.findall('./time-layout/layout-key[.="' + \
LAYOUT_KEY + '"]/../start-valid-time')
def get_more_weather_information(self):
''' Return a link to where to get more forecast weather information. '''
return self.forecast.find('./moreWeatherInformation') or 'NA'
def get_forecast(self):
''' Gather all the forecast information and print it out. '''
# get_forecast_location()
icons = self.get_forecast_conditions_icon()
days = self.get_forecast_days()
words = self.get_forecast_weather()
maxs = self.get_forecast_maximum_temperatures()
mins = self.get_forecast_minimum_temperatures()
maxs_and_mins = [j for i in zip(maxs, mins) for j in i]
if not (len(icons) == len(days) == len(words)):
print('Error: inconsistent lenghts')
zipped = zip(icons, days, words, maxs_and_mins)
print_forecast(zipped)
class Current:
''' A class to collect and display the NWS current information. '''
def __init__(self, current):
self.current = current
self.current_parameters = self.current.find('./parameters')
def get_current_location(self):
''' Return information about the location requested. '''
return self.current_parameters.find('./location') or 'NA'
def get_current_temperature(self):
''' Return the current temperature. '''
return self.current_parameters.find \
('./temperature[@type="apparent"]/value').text or 'NA'
def get_current_humidity(self):
''' Return the current relative humidity. '''
return self.current_parameters.find('./humidity/value').text or 'NA'
def get_current_weather(self):
''' Return the short words for the current condition. '''
return self.current_parameters.find \
('./weather/weather-conditions').attrib["weather-summary"] or 'NA'
def get_current_weather_icon(self):
''' Return the file name for the current icon. '''
current_icon_link = self.current_parameters.find('./conditions-icon/icon-link')
if current_icon_link is None:
print('Error: icon not found')
return None
return get_icon(current_icon_link)
def get_current_wind_speed(self):
''' Return the current wind speed. '''
return self.current_parameters.find('./wind-speed[@type="sustained"]/value').text or 'NA'
def get_current_wind_direction(self):
''' Return the current wind direction in degrees. '''
return self.current_parameters.find('./direction/value').text or 'NA'
def get_current_pressure(self):
''' Return the current barometric pressure. '''
return self.current_parameters.find('./pressure/value').text or 'NA'
def get_current(self):
''' Gather all the current information and print it out in conky format. '''
print(self.get_current_weather(), end='')
print('${{goto 300}}Wind speed:${{goto 410}}{}'.format(self.get_current_wind_speed()))
print('Temp:${{goto 100}}{}'.format(self.get_current_temperature()), end='')
print('${{goto 300}}Direction:${{goto 410}}{}'.format(self.get_current_wind_direction()))
print('RH:${{goto 100}}{}'.format(self.get_current_humidity()), end='')
print('${{goto 300}}Pressure:${{goto 410}}{}'.format(self.get_current_pressure()))
filename = self.get_current_weather_icon()
print("${{image {} -p 200,0}}".format(filename))
# https://stackoverflow.com/questions/59067649/assert-true-vs-assert-is-not-none
'''
with open('~/src/conky/weather.gov') as response:
html = response.read()
tree = xml.etree.ElementTree.parse(io.StringIO(html))
'''
# print('getting forecast', file=sys.stderr)
SOURCE_URL = 'https://forecast.weather.gov/MapClick.php?lat=' + sys.argv[1] + \
'&lon=' + sys.argv[2] + '&unit=0&lg=english&FcstType=dwml'
try:
with urllib.request.urlopen(SOURCE_URL) as response:
HTML = response.read()
TREE = xml.etree.ElementTree.parse(io.BytesIO(HTML))
ROOT = TREE.getroot()
FORECAST = ROOT.find('./data[@type="forecast"]')
if FORECAST is None:
print('No forecast found')
FOURTEEN = FORECAST.find('./time-layout/layout-key[.="' + LAYOUT_KEY + '"]')
if FOURTEEN is None:
LAYOUT_KEY = 'k-p12h-n13-1'
THIRTEEN = FORECAST.find('./time-layout/layout-key[.="' + LAYOUT_KEY + '"]')
if THIRTEEN is None:
print('No time layout found')
sys.exit(1)
CURRENT = ROOT.find('./data[@type="current observations"]')
if CURRENT is None:
print('Error: no current conditions found')
else:
Current(CURRENT).get_current()
Forecast(FORECAST).get_forecast()
except:
print('Failed to fetch:' + SOURCE_URL)
| 38.311558
| 97
| 0.625787
|
10bdbdca822a0a8e63f46b1702be606ccf616db9
| 16,572
|
py
|
Python
|
cynetworkx/classes/tests/historical_tests.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 12
|
2019-07-23T08:07:53.000Z
|
2022-03-09T06:13:16.000Z
|
cynetworkx/classes/tests/historical_tests.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 7
|
2019-08-30T07:00:00.000Z
|
2021-12-30T08:02:56.000Z
|
cynetworkx/classes/tests/historical_tests.py
|
Viech/cynetworkx
|
01a37859c67b752392e9e783c949084964eef2cf
|
[
"BSD-3-Clause"
] | 5
|
2020-10-10T03:40:32.000Z
|
2021-11-23T12:28:53.000Z
|
#!/usr/bin/env python
"""Original NetworkX graph tests"""
from nose.tools import *
import cynetworkx as nx
from cynetworkx import convert_node_labels_to_integers as cnlti
from cynetworkx.testing import *
class HistoricalTests(object):
def setUp(self):
self.null = nx.null_graph()
self.P1 = cnlti(nx.path_graph(1), first_label=1)
self.P3 = cnlti(nx.path_graph(3), first_label=1)
self.P10 = cnlti(nx.path_graph(10), first_label=1)
self.K1 = cnlti(nx.complete_graph(1), first_label=1)
self.K3 = cnlti(nx.complete_graph(3), first_label=1)
self.K4 = cnlti(nx.complete_graph(4), first_label=1)
self.K5 = cnlti(nx.complete_graph(5), first_label=1)
self.K10 = cnlti(nx.complete_graph(10), first_label=1)
self.G = nx.Graph
def test_name(self):
G = self.G(name="test")
assert_equal(str(G), 'test')
assert_equal(G.name, 'test')
H = self.G()
assert_equal(H.name, '')
# Nodes
def test_add_remove_node(self):
G = self.G()
G.add_node('A')
assert_true(G.has_node('A'))
G.remove_node('A')
assert_false(G.has_node('A'))
def test_nonhashable_node(self):
# Test if a non-hashable object is in the Graph. A python dict will
# raise a TypeError, but for a Graph class a simple False should be
# returned (see Graph __contains__). If it cannot be a node then it is
# not a node.
G = self.G()
assert_false(G.has_node(['A']))
assert_false(G.has_node({'A': 1}))
def test_add_nodes_from(self):
G = self.G()
G.add_nodes_from(list("ABCDEFGHIJKL"))
assert_true(G.has_node("L"))
G.remove_nodes_from(['H', 'I', 'J', 'K', 'L'])
G.add_nodes_from([1, 2, 3, 4])
assert_equal(sorted(G.nodes(), key=str),
[1, 2, 3, 4, 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
# test __iter__
assert_equal(sorted(G, key=str),
[1, 2, 3, 4, 'A', 'B', 'C', 'D', 'E', 'F', 'G'])
def test_contains(self):
G = self.G()
G.add_node('A')
assert_true('A' in G)
assert_false([] in G) # never raise a Key or TypeError in this test
assert_false({1: 1} in G)
def test_add_remove(self):
# Test add_node and remove_node acting for various nbunch
G = self.G()
G.add_node('m')
assert_true(G.has_node('m'))
G.add_node('m') # no complaints
assert_raises(nx.NetworkXError, G.remove_node, 'j')
G.remove_node('m')
assert_equal(list(G), [])
def test_nbunch_is_list(self):
G = self.G()
G.add_nodes_from(list("ABCD"))
G.add_nodes_from(self.P3) # add nbunch of nodes (nbunch=Graph)
assert_equal(sorted(G.nodes(), key=str),
[1, 2, 3, 'A', 'B', 'C', 'D'])
G.remove_nodes_from(self.P3) # remove nbunch of nodes (nbunch=Graph)
assert_equal(sorted(G.nodes(), key=str),
['A', 'B', 'C', 'D'])
def test_nbunch_is_set(self):
G = self.G()
nbunch = set("ABCDEFGHIJKL")
G.add_nodes_from(nbunch)
assert_true(G.has_node("L"))
def test_nbunch_dict(self):
# nbunch is a dict with nodes as keys
G = self.G()
nbunch = set("ABCDEFGHIJKL")
G.add_nodes_from(nbunch)
nbunch = {'I': "foo", 'J': 2, 'K': True, 'L': "spam"}
G.remove_nodes_from(nbunch)
assert_true(sorted(G.nodes(), key=str),
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
def test_nbunch_iterator(self):
G = self.G()
G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
n_iter = self.P3.nodes()
G.add_nodes_from(n_iter)
assert_equal(sorted(G.nodes(), key=str),
[1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
n_iter = self.P3.nodes() # rebuild same iterator
G.remove_nodes_from(n_iter) # remove nbunch of nodes (nbunch=iterator)
assert_equal(sorted(G.nodes(), key=str),
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
def test_nbunch_graph(self):
G = self.G()
G.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
nbunch = self.K3
G.add_nodes_from(nbunch)
assert_true(sorted(G.nodes(), key=str),
[1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
# Edges
def test_add_edge(self):
G = self.G()
assert_raises(TypeError, G.add_edge, 'A')
G.add_edge('A', 'B') # testing add_edge()
G.add_edge('A', 'B') # should fail silently
assert_true(G.has_edge('A', 'B'))
assert_false(G.has_edge('A', 'C'))
assert_true(G.has_edge(*('A', 'B')))
if G.is_directed():
assert_false(G.has_edge('B', 'A'))
else:
# G is undirected, so B->A is an edge
assert_true(G.has_edge('B', 'A'))
G.add_edge('A', 'C') # test directedness
G.add_edge('C', 'A')
G.remove_edge('C', 'A')
if G.is_directed():
assert_true(G.has_edge('A', 'C'))
else:
assert_false(G.has_edge('A', 'C'))
assert_false(G.has_edge('C', 'A'))
def test_self_loop(self):
G = self.G()
G.add_edge('A', 'A') # test self loops
assert_true(G.has_edge('A', 'A'))
G.remove_edge('A', 'A')
G.add_edge('X', 'X')
assert_true(G.has_node('X'))
G.remove_node('X')
G.add_edge('A', 'Z') # should add the node silently
assert_true(G.has_node('Z'))
def test_add_edges_from(self):
G = self.G()
G.add_edges_from([('B', 'C')]) # test add_edges_from()
assert_true(G.has_edge('B', 'C'))
if G.is_directed():
assert_false(G.has_edge('C', 'B'))
else:
assert_true(G.has_edge('C', 'B')) # undirected
G.add_edges_from([('D', 'F'), ('B', 'D')])
assert_true(G.has_edge('D', 'F'))
assert_true(G.has_edge('B', 'D'))
if G.is_directed():
assert_false(G.has_edge('D', 'B'))
else:
assert_true(G.has_edge('D', 'B')) # undirected
def test_add_edges_from2(self):
G = self.G()
# after failing silently, should add 2nd edge
G.add_edges_from([tuple('IJ'), list('KK'), tuple('JK')])
assert_true(G.has_edge(*('I', 'J')))
assert_true(G.has_edge(*('K', 'K')))
assert_true(G.has_edge(*('J', 'K')))
if G.is_directed():
assert_false(G.has_edge(*('K', 'J')))
else:
assert_true(G.has_edge(*('K', 'J')))
def test_add_edges_from3(self):
G = self.G()
G.add_edges_from(zip(list('ACD'), list('CDE')))
assert_true(G.has_edge('D', 'E'))
assert_false(G.has_edge('E', 'C'))
def test_remove_edge(self):
G = self.G()
G.add_nodes_from([1, 2, 3, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'])
G.add_edges_from(zip(list('MNOP'), list('NOPM')))
assert_true(G.has_edge('O', 'P'))
assert_true(G.has_edge('P', 'M'))
G.remove_node('P') # tests remove_node()'s handling of edges.
assert_false(G.has_edge('P', 'M'))
assert_raises(TypeError, G.remove_edge, 'M')
G.add_edge('N', 'M')
assert_true(G.has_edge('M', 'N'))
G.remove_edge('M', 'N')
assert_false(G.has_edge('M', 'N'))
# self loop fails silently
G.remove_edges_from([list('HI'), list('DF'),
tuple('KK'), tuple('JK')])
assert_false(G.has_edge('H', 'I'))
assert_false(G.has_edge('J', 'K'))
G.remove_edges_from([list('IJ'), list('KK'), list('JK')])
assert_false(G.has_edge('I', 'J'))
G.remove_nodes_from(set('ZEFHIMNO'))
G.add_edge('J', 'K')
def test_edges_nbunch(self):
# Test G.edges(nbunch) with various forms of nbunch
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
# node not in nbunch should be quietly ignored
assert_raises(nx.NetworkXError, G.edges, 6)
assert_equals(list(G.edges('Z')), []) # iterable non-node
# nbunch can be an empty list
assert_equals(list(G.edges([])), [])
if G.is_directed():
elist = [('A', 'B'), ('A', 'C'), ('B', 'D')]
else:
elist = [('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')]
# nbunch can be a list
assert_edges_equal(list(G.edges(['A', 'B'])), elist)
# nbunch can be a set
assert_edges_equal(G.edges(set(['A', 'B'])), elist)
# nbunch can be a graph
G1 = self.G()
G1.add_nodes_from('AB')
assert_edges_equal(G.edges(G1), elist)
# nbunch can be a dict with nodes as keys
ndict = {'A': "thing1", 'B': "thing2"}
assert_edges_equal(G.edges(ndict), elist)
# nbunch can be a single node
assert_edges_equal(list(G.edges('A')), [('A', 'B'), ('A', 'C')])
assert_nodes_equal(sorted(G), ['A', 'B', 'C', 'D'])
# nbunch can be nothing (whole graph)
assert_edges_equal(
list(G.edges()),
[('A', 'B'), ('A', 'C'), ('B', 'D'), ('C', 'B'), ('C', 'D')]
)
def test_degree(self):
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
assert_equal(G.degree('A'), 2)
# degree of single node in iterable container must return dict
assert_equal(list(G.degree(['A'])), [('A', 2)])
assert_equal(sorted(d for n, d in G.degree(['A', 'B'])), [2, 3])
assert_equal(sorted(d for n, d in G.degree()), [2, 2, 3, 3])
def test_degree2(self):
H = self.G()
H.add_edges_from([(1, 24), (1, 2)])
assert_equal(sorted(d for n, d in H.degree([1, 24])), [1, 2])
def test_degree_graph(self):
P3 = nx.path_graph(3)
P5 = nx.path_graph(5)
# silently ignore nodes not in P3
assert_equal(dict(d for n, d in P3.degree(['A', 'B'])), {})
# nbunch can be a graph
assert_equal(sorted(d for n, d in P5.degree(P3)), [1, 2, 2])
# nbunch can be a graph that's way too big
assert_equal(sorted(d for n, d in P3.degree(P5)), [1, 1, 2])
assert_equal(list(P5.degree([])), [])
assert_equal(dict(P5.degree([])), {})
def test_null(self):
null = nx.null_graph()
assert_equal(list(null.degree()), [])
assert_equal(dict(null.degree()), {})
def test_order_size(self):
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
assert_equal(G.order(), 4)
assert_equal(G.size(), 5)
assert_equal(G.number_of_edges(), 5)
assert_equal(G.number_of_edges('A', 'B'), 1)
assert_equal(G.number_of_edges('A', 'D'), 0)
def test_copy(self):
G = self.G()
H = G.copy() # copy
assert_equal(H.adj, G.adj)
assert_equal(H.name, G.name)
assert_not_equal(H, G)
def test_subgraph(self):
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
SG = G.subgraph(['A', 'B', 'D'])
assert_nodes_equal(list(SG), ['A', 'B', 'D'])
assert_edges_equal(list(SG.edges()), [('A', 'B'), ('B', 'D')])
def test_to_directed(self):
G = self.G()
if not G.is_directed():
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
DG = G.to_directed()
assert_not_equal(DG, G) # directed copy or copy
assert_true(DG.is_directed())
assert_equal(DG.name, G.name)
assert_equal(DG.adj, G.adj)
assert_equal(sorted(DG.out_edges(list('AB'))),
[('A', 'B'), ('A', 'C'), ('B', 'A'),
('B', 'C'), ('B', 'D')])
DG.remove_edge('A', 'B')
assert_true(DG.has_edge('B', 'A')) # this removes B-A but not A-B
assert_false(DG.has_edge('A', 'B'))
def test_to_undirected(self):
G = self.G()
if G.is_directed():
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
UG = G.to_undirected() # to_undirected
assert_not_equal(UG, G)
assert_false(UG.is_directed())
assert_true(G.is_directed())
assert_equal(UG.name, G.name)
assert_not_equal(UG.adj, G.adj)
assert_equal(sorted(UG.edges(list('AB'))),
[('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')])
assert_equal(sorted(UG.edges(['A', 'B'])),
[('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D')])
UG.remove_edge('A', 'B')
assert_false(UG.has_edge('B', 'A'))
assert_false(UG.has_edge('A', 'B'))
def test_neighbors(self):
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
G.add_nodes_from('GJK')
assert_equal(sorted(G['A']), ['B', 'C'])
assert_equal(sorted(G.neighbors('A')), ['B', 'C'])
assert_equal(sorted(G.neighbors('A')), ['B', 'C'])
assert_equal(sorted(G.neighbors('G')), [])
assert_raises(nx.NetworkXError, G.neighbors, 'j')
def test_iterators(self):
G = self.G()
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'D'),
('C', 'B'), ('C', 'D')])
G.add_nodes_from('GJK')
assert_equal(sorted(G.nodes()),
['A', 'B', 'C', 'D', 'G', 'J', 'K'])
assert_edges_equal(G.edges(),
[('A', 'B'), ('A', 'C'), ('B', 'D'), ('C', 'B'), ('C', 'D')])
assert_equal(sorted([v for k, v in G.degree()]),
[0, 0, 0, 2, 2, 3, 3])
assert_equal(sorted(G.degree(), key=str),
[('A', 2), ('B', 3), ('C', 3), ('D', 2),
('G', 0), ('J', 0), ('K', 0)])
assert_equal(sorted(G.neighbors('A')), ['B', 'C'])
assert_raises(nx.NetworkXError, G.neighbors, 'X')
G.clear()
assert_equal(nx.number_of_nodes(G), 0)
assert_equal(nx.number_of_edges(G), 0)
def test_null_subgraph(self):
# Subgraph of a null graph is a null graph
nullgraph = nx.null_graph()
G = nx.null_graph()
H = G.subgraph([])
assert_true(nx.is_isomorphic(H, nullgraph))
def test_empty_subgraph(self):
# Subgraph of an empty graph is an empty graph. test 1
nullgraph = nx.null_graph()
E5 = nx.empty_graph(5)
E10 = nx.empty_graph(10)
H = E10.subgraph([])
assert_true(nx.is_isomorphic(H, nullgraph))
H = E10.subgraph([1, 2, 3, 4, 5])
assert_true(nx.is_isomorphic(H, E5))
def test_complete_subgraph(self):
# Subgraph of a complete graph is a complete graph
K1 = nx.complete_graph(1)
K3 = nx.complete_graph(3)
K5 = nx.complete_graph(5)
H = K5.subgraph([1, 2, 3])
assert_true(nx.is_isomorphic(H, K3))
def test_subgraph_nbunch(self):
nullgraph = nx.null_graph()
K1 = nx.complete_graph(1)
K3 = nx.complete_graph(3)
K5 = nx.complete_graph(5)
# Test G.subgraph(nbunch), where nbunch is a single node
H = K5.subgraph(1)
assert_true(nx.is_isomorphic(H, K1))
# Test G.subgraph(nbunch), where nbunch is a set
H = K5.subgraph(set([1]))
assert_true(nx.is_isomorphic(H, K1))
# Test G.subgraph(nbunch), where nbunch is an iterator
H = K5.subgraph(iter(K3))
assert_true(nx.is_isomorphic(H, K3))
# Test G.subgraph(nbunch), where nbunch is another graph
H = K5.subgraph(K3)
assert_true(nx.is_isomorphic(H, K3))
H = K5.subgraph([9])
assert_true(nx.is_isomorphic(H, nullgraph))
def test_node_tuple_issue(self):
H = self.G()
# Test error handling of tuple as a node
assert_raises(nx.NetworkXError, H.remove_node, (1, 2))
H.remove_nodes_from([(1, 2)]) # no error
assert_raises(nx.NetworkXError, H.neighbors, (1, 2))
| 38.009174
| 88
| 0.505853
|
ed90ccdf6c76b7adada262e2050fafaa2b6b0030
| 5,861
|
py
|
Python
|
examples/fixed_resolution.py
|
regular/pyglet-avbin-optimizations
|
e93c0508a57c92c24cc22dee12be0a58b1fcf975
|
[
"BSD-3-Clause"
] | 2
|
2017-05-10T08:27:22.000Z
|
2019-10-05T14:55:00.000Z
|
examples/fixed_resolution.py
|
regular/pyglet-avbin-optimizations
|
e93c0508a57c92c24cc22dee12be0a58b1fcf975
|
[
"BSD-3-Clause"
] | null | null | null |
examples/fixed_resolution.py
|
regular/pyglet-avbin-optimizations
|
e93c0508a57c92c24cc22dee12be0a58b1fcf975
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Demonstrates one way of fixing the display resolution to a certain
size, but rendering to the full screen.
The method used in this example is:
1. Set the OpenGL viewport to the fixed resolution
2. Render the scene using any OpenGL functions (here, just a polygon)
3. Copy the framebuffer into a texture
4. Reset the OpenGL viewport to the window (full screen) size
5. Blit the texture to the framebuffer
Recent video cards could also render the scene directly to the texture
using EXT_framebuffer_object. (This is not demonstrated in this example).
'''
from pyglet.gl import *
import pyglet
# Create a fullscreen window using the user's desktop resolution. You can
# also use this technique on ordinary resizable windows.
window = pyglet.window.Window(fullscreen=True)
# Use 320x200 fixed resolution to make the effect completely obvious. You
# can change this to a more reasonable value such as 800x600 here.
target_resolution = 320, 200
class FixedResolutionViewport(object):
def __init__(self, window, width, height, filtered=False):
self.window = window
self.width = width
self.height = height
self.texture = pyglet.image.Texture.create(width, height,
rectangle=True)
if not filtered:
# By default the texture will be bilinear filtered when scaled
# up. If requested, turn filtering off. This makes the image
# aliased, but is more suitable for pixel art.
glTexParameteri(self.texture.target,
GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(self.texture.target,
GL_TEXTURE_MIN_FILTER, GL_NEAREST)
def begin(self):
glViewport(0, 0, self.width, self.height)
self.set_fixed_projection()
def end(self):
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
self.texture.blit_into(buffer, 0, 0, 0)
glViewport(0, 0, self.window.width, self.window.height)
self.set_window_projection()
aspect_width = self.window.width / float(self.width)
aspect_height = self.window.height / float(self.height)
if aspect_width > aspect_height:
scale_width = aspect_height * self.width
scale_height = aspect_height * self.height
else:
scale_width = aspect_width * self.width
scale_height = aspect_width * self.height
x = (self.window.width - scale_width) / 2
y = (self.window.height - scale_height) / 2
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glColor3f(1, 1, 1)
self.texture.blit(x, y, width=scale_width, height=scale_height)
def set_fixed_projection(self):
# Override this method if you need to change the projection of the
# fixed resolution viewport.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.width, 0, self.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
def set_window_projection(self):
# This is the same as the default window projection, reprinted here
# for clarity.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.window.width, 0, self.window.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
target_width, target_height = target_resolution
viewport = FixedResolutionViewport(window,
target_width, target_height, filtered=False)
def draw_scene():
'''Draw the scene, assuming the fixed resolution viewport and projection
have been set up. This just draws the rotated polygon.'''
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
w, h = target_resolution
glTranslatef(w/2, h/2, 0)
glRotatef(rotate, 0, 0, 1)
glColor3f(1, 0, 0)
s = min(w, h) / 3
glRectf(-s, -s, s, s)
rotate = 0
def update(dt):
global rotate
rotate += dt * 20
pyglet.clock.schedule_interval(update, 1/60.)
@window.event
def on_draw():
viewport.begin()
window.clear()
draw_scene()
viewport.end()
pyglet.app.run()
| 37.33121
| 78
| 0.682136
|
cac6b816324519f55a75a1952b17d7bd972b5a51
| 1,676
|
py
|
Python
|
que.py
|
recall704/what_cms
|
8cd6c6cd2d61669d175520ae67afb929113f8034
|
[
"CC0-1.0"
] | 3
|
2016-12-08T02:41:18.000Z
|
2018-12-04T13:58:09.000Z
|
que.py
|
recall704/what_cms
|
8cd6c6cd2d61669d175520ae67afb929113f8034
|
[
"CC0-1.0"
] | null | null | null |
que.py
|
recall704/what_cms
|
8cd6c6cd2d61669d175520ae67afb929113f8034
|
[
"CC0-1.0"
] | 2
|
2016-12-08T02:41:19.000Z
|
2019-10-04T14:23:10.000Z
|
#encoding=utf-8
import threading
import os
import sys
import urllib2
import time
from random import randint
from libs.WhatcmsColor import *
def get_file_list():
file_list = os.listdir(os.getcwd()+os.sep+"dir_has_matched_file_path")
return file_list
def get_url_status_code(url):
req = urllib2.Request(url)
try:
res = urllib2.urlopen(req,timeout=5)
if res.getcode()==200:
return True
else:
return False
except:
return False
class Tester(threading.Thread):
def __init__(self,name,file_path):
threading.Thread.__init__(self)
self.name = name
self.path = file_path
self.thread_stop = False
def run(self):
if not self.thread_stop:
host = "http://www.freebuf.com"
print "handle %s => %s,time = %s\n" %(self.name,self.path,time.ctime())
f = open(os.getcwd()+os.sep+"dir_has_matched_file_path"+os.sep+self.name)
lines = f.readlines()
f.close()
match_count = 0
for ll in lines:
time.sleep(1)
if get_url_status_code(host+ll):
color.cprint(self.name+ll,GREEN)
match_count +=1
else:
color.cprint(ll,YELLOW)
if match_count > 20:
sys.exit()
def stop(self):
self.thread_stop = True
if __name__ == "__main__":
file_list = get_file_list()
for fileItem in file_list:
p = Tester(fileItem,"/test/")
p.start()
| 26.1875
| 86
| 0.536396
|
b94d8c883757056e36b10733893563ceb54f3f60
| 3,080
|
py
|
Python
|
eval_cifar.py
|
P2333/Bag-of-Tricks-for-AT
|
314683adcfe9ea7c7bfbff50007da510b21f56e1
|
[
"Apache-2.0"
] | 192
|
2020-10-01T16:54:02.000Z
|
2022-03-29T14:48:36.000Z
|
eval_cifar.py
|
P2333/Bag-of-Tricks-for-AT
|
314683adcfe9ea7c7bfbff50007da510b21f56e1
|
[
"Apache-2.0"
] | 5
|
2021-01-10T04:25:49.000Z
|
2021-11-06T14:42:36.000Z
|
eval_cifar.py
|
P2333/Bag-of-Tricks-for-AT
|
314683adcfe9ea7c7bfbff50007da510b21f56e1
|
[
"Apache-2.0"
] | 29
|
2020-10-02T08:23:17.000Z
|
2022-02-19T14:24:35.000Z
|
import argparse
import copy
import logging
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from preactresnet import PreActResNet18
from wideresnet import WideResNet
from utils_plus import (upper_limit, lower_limit, std, clamp, get_loaders,
attack_pgd, evaluate_pgd, evaluate_standard, normalize)
from autoattack import AutoAttack
# installing AutoAttack by: pip install git+https://github.com/fra31/auto-attack
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--out-dir', default='train_fgsm_output', type=str, help='Output directory')
parser.add_argument('--seed', default=0, type=int, help='Random seed')
return parser.parse_args()
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.StreamHandler()
])
logger.info(args)
_, test_loader = get_loaders(args.data_dir, args.batch_size)
best_state_dict = torch.load(os.path.join(args.out_dir, 'model_best.pth'))
# Evaluation
model_test = PreActResNet18().cuda()
# model_test = WideResNet(34, 10, widen_factor=10, dropRate=0.0)
model_test = nn.DataParallel(model_test).cuda()
if 'state_dict' in best_state_dict.keys():
model_test.load_state_dict(best_state_dict['state_dict'])
else:
model_test.load_state_dict(best_state_dict)
model_test.float()
model_test.eval()
### Evaluate clean acc ###
_, test_acc = evaluate_standard(test_loader, model_test)
print('Clean acc: ', test_acc)
### Evaluate PGD (CE loss) acc ###
_, pgd_acc_CE = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=False)
print('PGD-10 (10 restarts, step 2, CE loss) acc: ', pgd_acc_CE)
### Evaluate PGD (CW loss) acc ###
_, pgd_acc_CW = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=True)
print('PGD-10 (10 restarts, step 2, CW loss) acc: ', pgd_acc_CW)
### Evaluate AutoAttack ###
l = [x for (x, y) in test_loader]
x_test = torch.cat(l, 0)
l = [y for (x, y) in test_loader]
y_test = torch.cat(l, 0)
class normalize_model():
def __init__(self, model):
self.model_test = model
def __call__(self, x):
return self.model_test(normalize(x))
new_model = normalize_model(model_test)
epsilon = 8 / 255.
adversary = AutoAttack(new_model, norm='Linf', eps=epsilon, version='standard')
X_adv = adversary.run_standard_evaluation(x_test, y_test, bs=128)
if __name__ == "__main__":
main()
| 33.11828
| 119
| 0.682143
|
613d7073f4dc3fa65e7f2fd16aa659c2f3301c58
| 1,905
|
py
|
Python
|
docs/conf.py
|
cptpcrd/psutil-extra
|
b14ef19bfbe443aea4009ea49c7a327ed6abd393
|
[
"MIT"
] | 1
|
2020-07-25T19:53:19.000Z
|
2020-07-25T19:53:19.000Z
|
docs/conf.py
|
cptpcrd/psutil-extra
|
b14ef19bfbe443aea4009ea49c7a327ed6abd393
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
cptpcrd/psutil-extra
|
b14ef19bfbe443aea4009ea49c7a327ed6abd393
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import sphinx_rtd_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'psutil-extra'
copyright = '2020, cptpcrd'
author = 'cptpcrd'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.017857
| 79
| 0.665092
|
dd78d36476f32d818a5ca03c8d8fe5307d0bcbe0
| 614
|
py
|
Python
|
pyretri/extract/splitter/splitter_impl/identity.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 1,063
|
2020-04-21T12:42:05.000Z
|
2022-03-31T06:32:50.000Z
|
pyretri/extract/splitter/splitter_impl/identity.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 39
|
2020-05-07T07:24:19.000Z
|
2022-02-02T23:49:23.000Z
|
pyretri/extract/splitter/splitter_impl/identity.py
|
dongan-beta/PyRetri
|
8756d5d5813a5211b58855373b6c6cd33d7a11f6
|
[
"Apache-2.0"
] | 174
|
2020-04-26T04:33:11.000Z
|
2022-03-17T02:58:45.000Z
|
# -*- coding: utf-8 -*-
import torch
import numpy as np
from ..splitter_base import SplitterBase
from ...registry import SPLITTERS
from typing import Dict
@SPLITTERS.register
class Identity(SplitterBase):
"""
Directly return feature maps without any operations.
"""
default_hyper_params = dict()
def __init__(self, hps: Dict or None = None):
"""
Args:
hps (dict): default hyper parameters in a dict (keys, values).
"""
super(Identity, self).__init__(hps)
def __call__(self, features: torch.tensor) -> torch.tensor:
return features
| 21.928571
| 74
| 0.648208
|
7fd6e92e42fba61bec9635d40f3d315721816d88
| 1,299
|
py
|
Python
|
djangocms_page_meta/migrations/0009_auto_20171212_0944.py
|
ImaginaryLandscape/djangocms-page-meta
|
79f45ec3863eed30fd457d2425f9e28ec52f6959
|
[
"BSD-3-Clause"
] | 1
|
2021-09-13T17:51:15.000Z
|
2021-09-13T17:51:15.000Z
|
djangocms_page_meta/migrations/0009_auto_20171212_0944.py
|
ImaginaryLandscape/djangocms-page-meta
|
79f45ec3863eed30fd457d2425f9e28ec52f6959
|
[
"BSD-3-Clause"
] | 1
|
2021-12-10T17:11:43.000Z
|
2021-12-10T17:11:43.000Z
|
djangocms_page_meta/migrations/0009_auto_20171212_0944.py
|
ImaginaryLandscape/djangocms-page-meta
|
79f45ec3863eed30fd457d2425f9e28ec52f6959
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_page_meta', '0008_auto_20160609_0754'),
]
operations = [
migrations.AddField(
model_name='pagemeta',
name='description',
field=models.CharField(default='', max_length=400, blank=True),
),
migrations.AddField(
model_name='pagemeta',
name='gplus_description',
field=models.CharField(default='', max_length=400, verbose_name='Google+ Description', blank=True),
),
migrations.AddField(
model_name='pagemeta',
name='keywords',
field=models.CharField(default='', max_length=400, blank=True),
),
migrations.AddField(
model_name='pagemeta',
name='og_description',
field=models.CharField(default='', max_length=400, verbose_name='Facebook Description', blank=True),
),
migrations.AddField(
model_name='pagemeta',
name='twitter_description',
field=models.CharField(default='', max_length=140, verbose_name='Twitter Description', blank=True),
),
]
| 32.475
| 112
| 0.601232
|
4bccd11f68f2d62b61f24b6aade89e264d2d5d8b
| 1,322
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_table01.py
|
eddiechapman/XlsxWriter
|
c636117ab30e64e4b7b824c9105595c42887c2c9
|
[
"BSD-2-Clause-FreeBSD"
] | 2,766
|
2015-01-02T17:36:42.000Z
|
2022-03-31T09:23:30.000Z
|
xlsxwriter/test/comparison/test_chart_table01.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 683
|
2015-01-03T09:55:02.000Z
|
2022-03-31T07:18:15.000Z
|
xlsxwriter/test/comparison/test_chart_table01.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 636
|
2015-01-05T01:57:08.000Z
|
2022-03-25T18:42:41.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_table01.xlsx')
def test_create_file(self):
"""Test XlsxWriter chart axis table properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61355520, 61357056]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_table()
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 24.481481
| 79
| 0.559758
|
c8d01eb4225cb0df86c869ccd65334ebc54e2914
| 1,894
|
py
|
Python
|
tests/test_module_attributes.py
|
KOLANICH-libs/python-zstandard
|
53b71dc3f96961564c9c140bf88b0aa118589247
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_module_attributes.py
|
KOLANICH-libs/python-zstandard
|
53b71dc3f96961564c9c140bf88b0aa118589247
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_module_attributes.py
|
KOLANICH-libs/python-zstandard
|
53b71dc3f96961564c9c140bf88b0aa118589247
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import zstandard as zstd
from .common import TestCase
class TestModuleAttributes(TestCase):
def test_version(self):
self.assertEqual(zstd.ZSTD_VERSION, (1, 4, 5))
self.assertEqual(zstd.__version__, "0.15.0.dev0")
def test_constants(self):
self.assertEqual(zstd.MAX_COMPRESSION_LEVEL, 22)
self.assertEqual(zstd.FRAME_HEADER, b"\x28\xb5\x2f\xfd")
def test_hasattr(self):
attrs = (
"CONTENTSIZE_UNKNOWN",
"CONTENTSIZE_ERROR",
"COMPRESSION_RECOMMENDED_INPUT_SIZE",
"COMPRESSION_RECOMMENDED_OUTPUT_SIZE",
"DECOMPRESSION_RECOMMENDED_INPUT_SIZE",
"DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE",
"MAGIC_NUMBER",
"FLUSH_BLOCK",
"FLUSH_FRAME",
"BLOCKSIZELOG_MAX",
"BLOCKSIZE_MAX",
"WINDOWLOG_MIN",
"WINDOWLOG_MAX",
"CHAINLOG_MIN",
"CHAINLOG_MAX",
"HASHLOG_MIN",
"HASHLOG_MAX",
"HASHLOG3_MAX",
"MINMATCH_MIN",
"MINMATCH_MAX",
"SEARCHLOG_MIN",
"SEARCHLOG_MAX",
"SEARCHLENGTH_MIN",
"SEARCHLENGTH_MAX",
"TARGETLENGTH_MIN",
"TARGETLENGTH_MAX",
"LDM_MINMATCH_MIN",
"LDM_MINMATCH_MAX",
"LDM_BUCKETSIZELOG_MAX",
"STRATEGY_FAST",
"STRATEGY_DFAST",
"STRATEGY_GREEDY",
"STRATEGY_LAZY",
"STRATEGY_LAZY2",
"STRATEGY_BTLAZY2",
"STRATEGY_BTOPT",
"STRATEGY_BTULTRA",
"STRATEGY_BTULTRA2",
"DICT_TYPE_AUTO",
"DICT_TYPE_RAWCONTENT",
"DICT_TYPE_FULLDICT",
)
for a in attrs:
self.assertTrue(hasattr(zstd, a), a)
| 29.138462
| 64
| 0.554382
|
6376b220356238512bc9e633b4e8cb3244632f14
| 30,258
|
py
|
Python
|
nova/tests/functional/integrated_helpers.py
|
sapcc/nova
|
ad71af7307365d6aabd122e140f56df4db1e6182
|
[
"Apache-2.0"
] | 2
|
2021-10-11T04:56:25.000Z
|
2022-02-16T08:49:29.000Z
|
nova/tests/functional/integrated_helpers.py
|
sapcc/nova
|
ad71af7307365d6aabd122e140f56df4db1e6182
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/tests/functional/integrated_helpers.py
|
sapcc/nova
|
ad71af7307365d6aabd122e140f56df4db1e6182
|
[
"Apache-2.0"
] | 8
|
2017-03-27T07:50:38.000Z
|
2020-02-14T16:55:56.000Z
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides common functionality for integrated unit tests
"""
import random
import string
import time
from oslo_log import log as logging
import nova.conf
from nova import context
from nova.db import api as db
import nova.image.glance
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client as api_client
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_notifier
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def generate_random_alphanumeric(length):
"""Creates a random alphanumeric string of specified length."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _x in range(length))
def generate_random_numeric(length):
"""Creates a random numeric string of specified length."""
return ''.join(random.choice(string.digits)
for _x in range(length))
def generate_new_element(items, prefix, numeric=False):
"""Creates a random string with prefix, that is not in 'items' list."""
while True:
if numeric:
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
LOG.debug("Random collision on %s", candidate)
class _IntegratedTestBase(test.TestCase):
REQUIRES_LOCKING = True
ADMIN_API = False
# Override this in subclasses which use the NeutronFixture. New tests
# should rely on Neutron since nova-network is deprecated. The default
# value of False here is only temporary while we update the existing
# functional tests to use Neutron.
USE_NEUTRON = False
def setUp(self):
super(_IntegratedTestBase, self).setUp()
# TODO(mriedem): Fix the functional tests to work with Neutron.
self.flags(use_neutron=self.USE_NEUTRON)
self.fake_image_service =\
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self))
self.useFixture(nova_fixtures.Database(database='placement'))
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
self._setup_services()
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _setup_compute_service(self):
return self.start_service('compute')
def _setup_scheduler_service(self):
return self.start_service('scheduler')
def _setup_services(self):
# NOTE(danms): Set the global MQ connection to that of our first cell
# for any cells-ignorant code. Normally this is defaulted in the tests
# which will result in us not doing the right thing.
if 'cell1' in self.cell_mappings:
self.flags(transport_url=self.cell_mappings['cell1'].transport_url)
self.conductor = self.start_service('conductor')
self.consoleauth = self.start_service('consoleauth')
if self.USE_NEUTRON:
self.neutron = self.useFixture(nova_fixtures.NeutronFixture(self))
else:
self.network = self.start_service('network',
manager=CONF.network_manager)
self.scheduler = self._setup_scheduler_service()
self.compute = self._setup_compute_service()
self.api_fixture = self.useFixture(
nova_fixtures.OSAPIFixture(self.api_major_version))
# if the class needs to run as admin, make the api endpoint
# the admin, otherwise it's safer to run as non admin user.
if self.ADMIN_API:
self.api = self.api_fixture.admin_api
else:
self.api = self.api_fixture.api
if hasattr(self, 'microversion'):
self.api.microversion = self.microversion
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_unused_flavor_name_id(self):
flavors = self.api.get_flavors()
flavor_names = list()
flavor_ids = list()
[(flavor_names.append(flavor['name']),
flavor_ids.append(flavor['id']))
for flavor in flavors]
return (generate_new_element(flavor_names, 'flavor'),
int(generate_new_element(flavor_ids, '', True)))
def get_invalid_image(self):
return uuids.fake
def _build_minimal_create_server_request(self, image_uuid=None):
server = {}
# NOTE(takashin): In API version 2.36, image APIs were deprecated.
# In API version 2.36 or greater, self.api.get_images() returns
# a 404 error. In that case, 'image_uuid' should be specified.
server[self._image_ref_parameter] = (image_uuid or
self.api.get_images()[0]['id'])
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
LOG.debug("Using flavor: %s", flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _create_flavor_body(self, name, ram, vcpus, disk, ephemeral, id, swap,
rxtx_factor, is_public):
return {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"OS-FLV-EXT-DATA:ephemeral": ephemeral,
"id": id,
"swap": swap,
"rxtx_factor": rxtx_factor,
"os-flavor-access:is_public": is_public,
}
}
def _create_flavor(self, memory_mb=2048, vcpu=2, disk=10, ephemeral=10,
swap=0, rxtx_factor=1.0, is_public=True,
extra_spec=None):
flv_name, flv_id = self.get_unused_flavor_name_id()
body = self._create_flavor_body(flv_name, memory_mb, vcpu, disk,
ephemeral, flv_id, swap, rxtx_factor,
is_public)
self.api_fixture.admin_api.post_flavor(body)
if extra_spec is not None:
spec = {"extra_specs": extra_spec}
self.api_fixture.admin_api.post_extra_spec(flv_id, spec)
return flv_id
def _build_server(self, flavor_id, image=None):
server = {}
if image is None:
image = self.api.get_images()[0]
LOG.debug("Image: %s", image)
# We now have a valid imageId
server[self._image_ref_parameter] = image['id']
else:
server[self._image_ref_parameter] = image
# Set a valid flavorId
flavor = self.api.get_flavor(flavor_id)
LOG.debug("Using flavor: %s", flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _check_api_endpoint(self, endpoint, expected_middleware):
app = self.api_fixture.app().get((None, '/v2'))
while getattr(app, 'application', False):
for middleware in expected_middleware:
if isinstance(app.application, middleware):
expected_middleware.remove(middleware)
break
app = app.application
self.assertEqual([],
expected_middleware,
("The expected wsgi middlewares %s are not "
"existed") % expected_middleware)
class InstanceHelperMixin(object):
def _wait_for_server_parameter(self, admin_api, server, expected_params,
max_retries=10):
retry_count = 0
while True:
server = admin_api.get_server(server['id'])
if all([server[attr] == expected_params[attr]
for attr in expected_params]):
break
retry_count += 1
if retry_count == max_retries:
self.fail('Wait for state change failed, '
'expected_params=%s, server=%s'
% (expected_params, server))
time.sleep(0.5)
return server
def _wait_for_state_change(self, admin_api, server, expected_status,
max_retries=10):
return self._wait_for_server_parameter(
admin_api, server, {'status': expected_status}, max_retries)
def _build_minimal_create_server_request(self, api, name, image_uuid=None,
flavor_id=None, networks=None,
az=None):
server = {}
# We now have a valid imageId
server['imageRef'] = image_uuid or api.get_images()[0]['id']
if not flavor_id:
# Set a valid flavorId
flavor_id = api.get_flavors()[1]['id']
server['flavorRef'] = ('http://fake.server/%s' % flavor_id)
server['name'] = name
if networks is not None:
server['networks'] = networks
if az is not None:
server['availability_zone'] = az
return server
def _wait_until_deleted(self, server):
initially_in_error = server.get('status') == 'ERROR'
try:
for i in range(40):
server = self.api.get_server(server['id'])
if not initially_in_error and server['status'] == 'ERROR':
self.fail('Server went to error state instead of'
'disappearing.')
time.sleep(0.5)
self.fail('Server failed to delete.')
except api_client.OpenStackApiNotFoundException:
return
def _wait_for_action_fail_completion(
self, server, expected_action, event_name, api=None):
"""Polls instance action events for the given instance, action and
action event name until it finds the action event with an error
result.
"""
if api is None:
api = self.api
completion_event = None
for attempt in range(10):
actions = api.get_instance_actions(server['id'])
# Look for the migrate action.
for action in actions:
if action['action'] == expected_action:
events = (
api.api_get(
'/servers/%s/os-instance-actions/%s' %
(server['id'], action['request_id'])
).body['instanceAction']['events'])
# Look for the action event being in error state.
for event in events:
if (event['event'] == event_name and
event['result'] is not None and
event['result'].lower() == 'error'):
completion_event = event
# Break out of the events loop.
break
if completion_event:
# Break out of the actions loop.
break
# We didn't find the completion event yet, so wait a bit.
time.sleep(0.5)
if completion_event is None:
self.fail('Timed out waiting for %s failure event. Current '
'instance actions: %s' % (event_name, actions))
def _wait_for_migration_status(self, server, expected_statuses):
"""Waits for a migration record with the given statuses to be found
for the given server, else the test fails. The migration record, if
found, is returned.
"""
api = getattr(self, 'admin_api', None)
if api is None:
api = self.api
statuses = [status.lower() for status in expected_statuses]
for attempt in range(10):
migrations = api.api_get('/os-migrations').body['migrations']
for migration in migrations:
if (migration['instance_uuid'] == server['id'] and
migration['status'].lower() in statuses):
return migration
time.sleep(0.5)
self.fail('Timed out waiting for migration with status "%s" for '
'instance: %s' % (expected_statuses, server['id']))
class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin):
"""Base test class for functional tests that check provider usage
and consumer allocations in Placement during various operations.
Subclasses must define a **compute_driver** attribute for the virt driver
to use.
This class sets up standard fixtures and controller services but does not
start any compute services, that is left to the subclass.
"""
microversion = 'latest'
def setUp(self):
self.flags(compute_driver=self.compute_driver)
super(ProviderUsageBaseTestCase, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.api = self.admin_api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.scheduler_service = self.start_service('scheduler')
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.computes = {}
def _start_compute(self, host, cell_name=None):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:param cell_name: optional name of the cell in which to start the
compute service (defaults to cell1)
:return: the nova compute service object
"""
compute = self.start_service('compute', host=host, cell=cell_name)
self.computes[host] = compute
return compute
def _get_provider_uuid_by_host(self, host):
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _get_provider_usages(self, provider_uuid):
return self.placement_api.get(
'/resource_providers/%s/usages' % provider_uuid).body['usages']
def _get_allocations_by_server_uuid(self, server_uuid):
return self.placement_api.get(
'/allocations/%s' % server_uuid).body['allocations']
def _get_allocations_by_provider_uuid(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/allocations' % rp_uuid).body['allocations']
def _get_all_providers(self):
return self.placement_api.get(
'/resource_providers', version='1.14').body['resource_providers']
def _create_trait(self, trait):
return self.placement_api.put('/traits/%s' % trait, {}, version='1.6')
def _get_provider_traits(self, provider_uuid):
return self.placement_api.get(
'/resource_providers/%s/traits' % provider_uuid,
version='1.6').body['traits']
def _set_provider_traits(self, rp_uuid, traits):
"""This will overwrite any existing traits.
:param rp_uuid: UUID of the resource provider to update
:param traits: list of trait strings to set on the provider
:returns: APIResponse object with the results
"""
provider = self.placement_api.get(
'/resource_providers/%s' % rp_uuid).body
put_traits_req = {
'resource_provider_generation': provider['generation'],
'traits': traits
}
return self.placement_api.put(
'/resource_providers/%s/traits' % rp_uuid,
put_traits_req, version='1.6')
def _get_all_resource_classes(self):
dicts = self.placement_api.get(
'/resource_classes', version='1.2').body['resource_classes']
return [d['name'] for d in dicts]
def _get_all_traits(self):
return self.placement_api.get('/traits', version='1.6').body['traits']
def _get_provider_inventory(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/inventories' % rp_uuid).body['inventories']
def _get_provider_aggregates(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s/aggregates' % rp_uuid,
version='1.1').body['aggregates']
def _post_resource_provider(self, rp_name):
return self.placement_api.post(
url='/resource_providers',
version='1.20', body={'name': rp_name}).body
def _set_inventory(self, rp_uuid, inv_body):
"""This will set the inventory for a given resource provider.
:param rp_uuid: UUID of the resource provider to update
:param inv_body: inventory to set on the provider
:returns: APIResponse object with the results
"""
return self.placement_api.post(
url= ('/resource_providers/%s/inventories' % rp_uuid),
version='1.15', body=inv_body).body
def _get_resource_provider_by_uuid(self, rp_uuid):
return self.placement_api.get(
'/resource_providers/%s' % rp_uuid, version='1.15').body
def _set_aggregate(self, rp_uuid, agg_id):
provider = self.placement_api.get(
'/resource_providers/%s' % rp_uuid).body
post_agg_req = {"aggregates": [agg_id],
"resource_provider_generation": provider['generation']}
return self.placement_api.put(
'/resource_providers/%s/aggregates' % rp_uuid, version='1.19',
body=post_agg_req).body
def assertFlavorMatchesAllocation(self, flavor, allocation):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
self.assertEqual(flavor['disk'], allocation['DISK_GB'])
def assertFlavorsMatchAllocation(self, old_flavor, new_flavor, allocation):
self.assertEqual(old_flavor['vcpus'] + new_flavor['vcpus'],
allocation['VCPU'])
self.assertEqual(old_flavor['ram'] + new_flavor['ram'],
allocation['MEMORY_MB'])
self.assertEqual(old_flavor['disk'] + new_flavor['disk'],
allocation['DISK_GB'])
def assertFlavorMatchesUsage(self, rp_uuid, flavor):
usages = self._get_provider_usages(rp_uuid)
self.assertFlavorMatchesAllocation(flavor, usages)
def get_migration_uuid_for_instance(self, instance_uuid):
# NOTE(danms): This is too much introspection for a test like this, but
# we can't see the migration uuid from the API, so we just encapsulate
# the peek behind the curtains here to keep it out of the tests.
# TODO(danms): Get the migration uuid from the API once it is exposed
ctxt = context.get_admin_context()
migrations = db.migration_get_all_by_filters(
ctxt, {'instance_uuid': instance_uuid})
self.assertEqual(1, len(migrations),
'Test expected a single migration, '
'but found %i' % len(migrations))
return migrations[0].uuid
def _boot_and_check_allocations(self, flavor, source_hostname):
"""Boot an instance and check that the resource allocation is correct
After booting an instance on the given host with a given flavor it
asserts that both the providers usages and resource allocations match
with the resources requested in the flavor. It also asserts that
running the periodic update_available_resource call does not change the
resource state.
:param flavor: the flavor the instance will be booted with
:param source_hostname: the name of the host the instance will be
booted on
:return: the API representation of the booted instance
"""
server_req = self._build_minimal_create_server_request(
self.api, 'some-server', flavor_id=flavor['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none')
server_req['availability_zone'] = 'nova:%s' % source_hostname
LOG.info('booting on %s', source_hostname)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(
self.admin_api, created_server, 'ACTIVE')
# Verify that our source host is what the server ended up on
self.assertEqual(source_hostname, server['OS-EXT-SRV-ATTR:host'])
source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
# Before we run periodics, make sure that we have allocations/usages
# only on the source host
source_usages = self._get_provider_usages(source_rp_uuid)
self.assertFlavorMatchesAllocation(flavor, source_usages)
# Check that the other providers has no usage
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()
if hostname != source_hostname]:
usages = self._get_provider_usages(rp_uuid)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, usages)
# Check that the server only allocates resource from the host it is
# booted on
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(1, len(allocations),
'No allocation for the server on the host it '
'is booted on')
allocation = allocations[source_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(flavor, allocation)
self._run_periodics()
# After running the periodics but before we start any other operation,
# we should have exactly the same allocation/usage information as
# before running the periodics
# Check usages on the selected host after boot
source_usages = self._get_provider_usages(source_rp_uuid)
self.assertFlavorMatchesAllocation(flavor, source_usages)
# Check that the server only allocates resource from the host it is
# booted on
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(1, len(allocations),
'No allocation for the server on the host it '
'is booted on')
allocation = allocations[source_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(flavor, allocation)
# Check that the other providers has no usage
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()
if hostname != source_hostname]:
usages = self._get_provider_usages(rp_uuid)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, usages)
return server
def _delete_and_check_allocations(self, server):
"""Delete the instance and asserts that the allocations are cleaned
:param server: The API representation of the instance to be deleted
"""
self.api.delete_server(server['id'])
self._wait_until_deleted(server)
# NOTE(gibi): The resource allocation is deleted after the instance is
# destroyed in the db so wait_until_deleted might return before the
# the resource are deleted in placement. So we need to wait for the
# instance.delete.end notification as that is emitted after the
# resources are freed.
fake_notifier.wait_for_versioned_notifications('instance.delete.end')
for rp_uuid in [self._get_provider_uuid_by_host(hostname)
for hostname in self.computes.keys()]:
usages = self._get_provider_usages(rp_uuid)
self.assertEqual({'VCPU': 0,
'MEMORY_MB': 0,
'DISK_GB': 0}, usages)
# and no allocations for the deleted server
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(0, len(allocations))
def _run_periodics(self):
"""Run the update_available_resource task on every compute manager
This runs periodics on the computes in an undefined order; some child
class redefined this function to force a specific order.
"""
ctx = context.get_admin_context()
for compute in self.computes.values():
LOG.info('Running periodic for compute (%s)',
compute.manager.host)
compute.manager.update_available_resource(ctx)
LOG.info('Finished with periodics')
def _move_and_check_allocations(self, server, request, old_flavor,
new_flavor, source_rp_uuid, dest_rp_uuid):
self.api.post_server_action(server['id'], request)
self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
def _check_allocation():
source_usages = self._get_provider_usages(source_rp_uuid)
self.assertFlavorMatchesAllocation(old_flavor, source_usages)
dest_usages = self._get_provider_usages(dest_rp_uuid)
self.assertFlavorMatchesAllocation(new_flavor, dest_usages)
# The instance should own the new_flavor allocation against the
# destination host created by the scheduler
allocations = self._get_allocations_by_server_uuid(server['id'])
self.assertEqual(1, len(allocations))
dest_alloc = allocations[dest_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(new_flavor, dest_alloc)
# The migration should own the old_flavor allocation against the
# source host created by conductor
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
allocations = self._get_allocations_by_server_uuid(migration_uuid)
source_alloc = allocations[source_rp_uuid]['resources']
self.assertFlavorMatchesAllocation(old_flavor, source_alloc)
# OK, so the move operation has run, but we have not yet confirmed or
# reverted the move operation. Before we run periodics, make sure
# that we have allocations/usages on BOTH the source and the
# destination hosts.
_check_allocation()
self._run_periodics()
_check_allocation()
# Make sure the RequestSpec.flavor matches the new_flavor.
ctxt = context.get_admin_context()
reqspec = objects.RequestSpec.get_by_instance_uuid(ctxt, server['id'])
self.assertEqual(new_flavor['id'], reqspec.flavor.flavorid)
def _migrate_and_check_allocations(self, server, flavor, source_rp_uuid,
dest_rp_uuid):
request = {
'migrate': None
}
self._move_and_check_allocations(
server, request=request, old_flavor=flavor, new_flavor=flavor,
source_rp_uuid=source_rp_uuid, dest_rp_uuid=dest_rp_uuid)
def _resize_and_check_allocations(self, server, old_flavor, new_flavor,
source_rp_uuid, dest_rp_uuid):
request = {
'resize': {
'flavorRef': new_flavor['id']
}
}
self._move_and_check_allocations(
server, request=request, old_flavor=old_flavor,
new_flavor=new_flavor, source_rp_uuid=source_rp_uuid,
dest_rp_uuid=dest_rp_uuid)
def _revert_resize(self, server):
self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
self._wait_for_migration_status(server, ['reverted'])
# Note that the migration status is changed to "reverted" in the
# dest host revert_resize method but the allocations are cleaned up
# in the source host finish_revert_resize method so we need to wait
# for the finish_revert_resize method to complete.
fake_notifier.wait_for_versioned_notifications(
'instance.resize_revert.end')
return server
| 42.025
| 79
| 0.624661
|
fd2bf35d376397950bc6cf41079f665494c3ac0a
| 20,098
|
py
|
Python
|
harmonica/resource.py
|
Aquaveo/harmonica
|
da0e1ee2f8cecf965e74af117e77417bf48f55eb
|
[
"BSD-3-Clause"
] | null | null | null |
harmonica/resource.py
|
Aquaveo/harmonica
|
da0e1ee2f8cecf965e74af117e77417bf48f55eb
|
[
"BSD-3-Clause"
] | null | null | null |
harmonica/resource.py
|
Aquaveo/harmonica
|
da0e1ee2f8cecf965e74af117e77417bf48f55eb
|
[
"BSD-3-Clause"
] | null | null | null |
"""Resource managers for the tidal database models supported in harmonica."""
# 1. Standard python modules
from abc import ABCMeta, abstractmethod
import os
import shutil
import urllib.request
from zipfile import ZipFile
# 2. Third party modules
import xarray as xr
# 3. Aquaveo modules
# 4. Local modules
from harmonica import config
MAX_NUM_CONS = 37 # Maximum number of constituents in all available models
class Resources(object):
"""Abstract base class for model resources."""
def __init__(self):
"""Base constructor."""
pass
__metaclass__ = ABCMeta
@abstractmethod
def resource_attributes(self):
"""Get the resource attributes of a model (e.g. web url, compression type).
Returns:
dict: Dictionary of model resource attributes
"""
return {}
@abstractmethod
def dataset_attributes(self):
"""Get the dataset attributes of a model (e.g. unit multiplier, grid dimensions).
Returns:
dict: Dictionary of model dataset attributes
"""
return {}
@abstractmethod
def available_constituents(self):
"""Get all the available constituents of a model.
Returns:
list: List of all the available constituents
"""
return []
@abstractmethod
def constituent_groups(self):
"""Get all the available constituents of a model grouped by compatible file types.
Returns:
list[list]: 2-D list of available constituents, where the first dimension groups compatible files
"""
return []
@abstractmethod
def constituent_resource(self, con):
"""Get the resource name of a constituent.
Returns:
str: Name of the constituent's resource
"""
return None
class Tpxo8Resources(Resources):
"""TPXO8 resources."""
TPXO8_CONS = [
{ # 1/30 degree
'K1': 'hf.k1_tpxo8_atlas_30c_v1.nc',
'K2': 'hf.k2_tpxo8_atlas_30c_v1.nc',
'M2': 'hf.m2_tpxo8_atlas_30c_v1.nc',
'M4': 'hf.m4_tpxo8_atlas_30c_v1.nc',
'N2': 'hf.n2_tpxo8_atlas_30c_v1.nc',
'O1': 'hf.o1_tpxo8_atlas_30c_v1.nc',
'P1': 'hf.p1_tpxo8_atlas_30c_v1.nc',
'Q1': 'hf.q1_tpxo8_atlas_30c_v1.nc',
'S2': 'hf.s2_tpxo8_atlas_30c_v1.nc',
},
{ # 1/6 degree
'MF': 'hf.mf_tpxo8_atlas_6.nc',
'MM': 'hf.mm_tpxo8_atlas_6.nc',
'MN4': 'hf.mn4_tpxo8_atlas_6.nc',
'MS4': 'hf.ms4_tpxo8_atlas_6.nc',
},
]
def __init__(self):
"""Constructor."""
super().__init__()
def resource_attributes(self):
"""Returns a dict of the resource attributes that are disabled (TPXO8 is not freely available)."""
return {
'url': None, # Resources must already exist. Licensing restrictions prevent hosting files.
'archive': None,
}
def dataset_attributes(self):
"""Returns a dict of the TPXO8 dataset attributes (currently just 'units_muliplier')."""
return {
'units_multiplier': 0.001, # mm to meter
}
def available_constituents(self):
"""Returns a list of the constituents supported by the TPXO8 tidal database."""
# get keys from const groups as list of lists and flatten
return [c for sl in [grp.keys() for grp in self.TPXO8_CONS] for c in sl]
def constituent_groups(self):
"""Returns a list of the constituents supported by the TPXO8 tidal database (2-D)."""
return [self.TPXO8_CONS[0], self.TPXO8_CONS[1]]
def constituent_resource(self, con):
"""Get the filename given a constituent.
Args:
con (str): Name of the constituent to get resource filename of. See TPXO8_CONS for list of available
constituents
Returns:
str: The basename of the resource file if the specified constituent is supported by the TPXO8 tidal
database, else None
"""
con = con.upper()
for group in self.TPXO8_CONS:
if con in group:
return group[con]
return None
class Tpxo9Resources(Resources):
"""TPXO9 resources."""
TPXO9_CONS = {'2N2', 'K1', 'K2', 'M2', 'M4', 'MF', 'MM', 'MN4', 'MS4', 'N2', 'O1', 'P1', 'Q1', 'S1', 'S2'}
DEFAULT_RESOURCE_FILE = 'tpxo9_netcdf/h_tpxo9.v1.nc'
def __init__(self):
"""Constructor."""
super().__init__()
def resource_attributes(self):
"""Returns a dict of the resource attributes that are disabled (TPXO9 is not freely available)."""
return {
'url': None, # Resources must already exist. Licensing restrictions prevent hosting files.
'archive': 'gz',
}
def dataset_attributes(self):
"""Returns a dict of the TPXO9 dataset attributes (currently just 'units_muliplier')."""
return {
'units_multiplier': 1.0, # meter
}
def available_constituents(self):
"""Returns a list of the constituents supported by the TPXO9 tidal database."""
return self.TPXO9_CONS
def constituent_groups(self):
"""Returns a list of the constituents supported by the TPXO9 tidal database (2-D)."""
return [self.available_constituents()]
def constituent_resource(self, con):
"""Get the filename given a constituent.
Args:
con (str): Name of the constituent to get resource filename of. See TPXO9_CONS for list of available
constituents
Returns:
str: The basename of the resource file if the specified constituent is supported by the TPXO9 tidal
database, else None
"""
if con.upper() in self.TPXO9_CONS:
return self.DEFAULT_RESOURCE_FILE
else:
return None
class LeProvostResources(Resources):
"""LeProvost resources."""
LEPROVOST_CONS = {'K1', 'K2', 'M2', 'N2', 'O1', 'P1', 'Q1', 'S2', 'NU2', 'MU2', '2N2', 'T2', 'L2'}
DEFAULT_RESOURCE_FILE = 'leprovost_tidal_db.nc'
def __init__(self):
"""Constructor."""
super().__init__()
def resource_attributes(self):
"""Returns a dict of the resource attributes needed to fetch the freely available LeProvost tidal database."""
return {
'url': 'http://sms.aquaveo.com/leprovost_tidal_db.zip',
'archive': 'zip', # zip compression
}
def dataset_attributes(self):
"""Returns a dict of the LeProvost dataset attributes."""
return {
'units_multiplier': 1.0, # meter
'num_lats': 361,
'num_lons': 720,
'min_lon': -180.0,
}
def available_constituents(self):
"""Returns a list of the constituents supported by the LeProvost tidal database."""
return self.LEPROVOST_CONS
def constituent_groups(self):
"""Returns a list of the constituents supported by the LeProvost tidal database (2-D)."""
return [self.available_constituents()]
def constituent_resource(self, con):
"""Get the filename given a constituent.
Args:
con (str): Name of the constituent to get resource filename of. See LEPROVOST_CONS for list of available
constituents
Returns:
str: The basename of the resource file if the specified constituent is supported by the LeProvost tidal
database, else None
"""
if con.upper() in self.LEPROVOST_CONS:
return self.DEFAULT_RESOURCE_FILE
else:
return None
class FES2014Resources(Resources):
"""FES2014 resources."""
FES2014_CONS = {
'2N2': '2n2.nc',
'EPS2': 'eps2.nc',
'J1': 'j1.nc',
'K1': 'k1.nc',
'K2': 'k2.nc',
'L2': 'l2.nc',
'LA2': 'la2.nc',
'M2': 'm2.nc',
'M3': 'm3.nc',
'M4': 'm4.nc',
'M6': 'm6.nc',
'M8': 'm8.nc',
'MF': 'mf.nc',
'MKS2': 'mks2.nc',
'MM': 'mm.nc',
'MN4': 'mn4.nc',
'MS4': 'ms4.nc',
'MSF': 'msf.nc',
'MSQM': 'msqm.nc',
'MTM': 'mtm.nc',
'MU2': 'mu2.nc',
'N2': 'n2.nc',
'N4': 'n4.nc',
'NU2': 'nu2.nc',
'O1': 'o1.nc',
'P1': 'p1.nc',
'Q1': 'q1.nc',
'R2': 'r2.nc',
'S1': 's1.nc',
'S2': 's2.nc',
'S4': 's4.nc',
'SA': 'sa.nc',
'SSA': 'ssa.nc',
'T2': 't2.nc',
}
def __init__(self):
"""Constructor."""
super().__init__()
def resource_attributes(self):
"""Returns a dict of the resource attributes that are disabled (FES2014 is not freely available)."""
return {
'url': None, # Resources must already exist. Licensing restrictions prevent hosting files.
'archive': None,
}
def dataset_attributes(self):
"""Returns a dict of the FES2014 dataset attributes."""
return {
'units_multiplier': 1.0, # meter
'num_lats': 2881,
'num_lons': 5760,
'min_lon': 0.0,
}
def available_constituents(self):
"""Returns a list of the constituents supported by the FES2014 tidal database."""
return self.FES2014_CONS.keys()
def constituent_groups(self):
"""Returns a list of the constituents supported by the FES2014 tidal database (2-D)."""
return [self.available_constituents()]
def constituent_resource(self, con):
"""Get the filename given a constituent.
Args:
con (str): Name of the constituent to get resource filename of. See FES2014_CONS for list of available
constituents
Returns:
str: The basename of the resource file if the specified constituent is supported by the FES2014 tidal
database, else None
"""
con = con.upper()
if con in self.FES2014_CONS:
return self.FES2014_CONS[con]
else:
return None
class Adcirc2015Resources(Resources):
"""ADCIRC (v2015) resources."""
ADCIRC_CONS = {
'M2', 'S2', 'N2', 'K1', 'M4', 'O1', 'M6', 'Q1', 'K2', 'L2', '2N2', 'R2', 'T2', 'LAMBDA2', 'MU2',
'NU2', 'J1', 'M1', 'OO1', 'P1', '2Q1', 'RHO1', 'M8', 'S4', 'S6', 'M3', 'S1', 'MK3', '2MK3', 'MN4',
'MS4', '2SM2', 'MF', 'MSF', 'MM', 'SA', 'SSA'
}
DEFAULT_RESOURCE_FILE = 'all_adcirc.nc'
def __init__(self):
"""Constructor."""
super().__init__()
def resource_attributes(self):
"""Returns a dict of the resource attributes needed to fetch the freely available ADCIRC tidal database."""
return {
'url': 'http://sms.aquaveo.com/',
'archive': None, # Uncompressed NetCDF file
}
def dataset_attributes(self):
"""Returns a dict of the dataset attributes (currently only 'units_multiplier')."""
return {'units_multiplier': 1.0} # meter
def available_constituents(self):
"""Returns a list of the constituents supported by the ADCIRC tidal database."""
return self.ADCIRC_CONS
def constituent_groups(self):
"""Returns a list of the constituents supported by the ADCIRC tidal database (2-D)."""
return [self.available_constituents()]
def constituent_resource(self, con):
"""Get the filename given a constituent.
Args:
con (str): Name of the constituent to get resource filename of. See ADCIRC_CONS for list of available
constituents
Returns:
str: The basename of the resource file if the specified constituent is supported by the ADCIRC tidal
database, else None
"""
if con.upper() in self.ADCIRC_CONS:
return self.DEFAULT_RESOURCE_FILE
else:
return None
class ResourceManager(object):
"""Harmonica resource manager to retrieve and access tide models."""
RESOURCES = {
'tpxo8': Tpxo8Resources(),
'tpxo9': Tpxo9Resources(),
'leprovost': LeProvostResources(),
'fes2014': FES2014Resources(),
'adcirc2015': Adcirc2015Resources(),
}
TPXO_MODELS = {'tpxo8', 'tpxo9'}
LEPROVOST_MODELS = {'fes2014', 'leprovost'}
ADCIRC_MODELS = {'adcirc2015'}
DEFAULT_RESOURCE = 'tpxo9'
def __init__(self, model=DEFAULT_RESOURCE):
"""Constructor.
Args:
model (str): Name of the model to use initially. See the constants defined in ResourceManager for valid
values.
"""
if model not in self.RESOURCES:
raise ValueError('Model not recognized.')
self.model = model
self.model_atts = self.RESOURCES[self.model]
self.datasets = []
def __del__(self):
"""Deleter - closes Dataset file handles."""
for d in self.datasets:
for dset in d:
dset.close()
@staticmethod
def data_dir_exists(model):
"""Check if a model's data directory exists in either the default location or the configurable one.
Args:
model (str): Name of the model. See the constants defined in ResourceManager for valid values.
Returns:
bool: True if the model's data folder exists in either location.
"""
if os.path.isdir(os.path.join(config['data_dir'], model)):
return True # Exists in the default %APPDATA% folder
if os.path.isdir(os.path.join(config['pre_existing_data_dir'], model)):
return True # Exists in the user configurable folder
return False
@staticmethod
def available_models():
"""Get a dict of flags indicating whether a tidal model has been installed.
This just performs a quick check for the existence of model data folders. If you need a more robust
check, write something else. Needed something fast for dialogs.
Returns:
dict: {'model': True if available else False}
"""
return {model: ResourceManager.data_dir_exists(model) for model in ResourceManager.RESOURCES}
def available_constituents(self):
"""Returns a list of the available constituents for the current model."""
return self.model_atts.available_constituents()
def get_units_multiplier(self):
"""Returns the units multiplier for the current model."""
return self.model_atts.dataset_attributes()['units_multiplier']
def download(self, resource, destination_dir):
"""Download a specified model resource."""
if not os.path.isdir(destination_dir):
os.makedirs(destination_dir)
rsrc_atts = self.model_atts.resource_attributes()
url = rsrc_atts['url']
# Check if we can download resources for this model.
if url is None:
raise ValueError("Automatic fetching of resources is not available for the {} model.".format(self.model))
if rsrc_atts['archive'] is None:
url = "".join((url, resource))
print('Downloading resource: {}'.format(url))
path = os.path.join(destination_dir, resource)
with urllib.request.urlopen(url) as response:
if rsrc_atts['archive'] is not None:
if rsrc_atts['archive'] == 'gz':
import tarfile
try:
tar = tarfile.open(mode='r:{}'.format(rsrc_atts['archive']), fileobj=response)
except IOError as e:
print(str(e))
else:
rsrcs = set(
self.model_atts.constituent_resource(con) for con in
self.model_atts.available_constituents()
)
tar.extractall(path=destination_dir, members=[m for m in tar.getmembers() if m.name in rsrcs])
tar.close()
elif rsrc_atts['archive'] == 'zip': # Unzip .zip files
zip_file = os.path.join(destination_dir, os.path.basename(resource) + '.zip')
with open(zip_file, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# Unzip the files
print("Unzipping files to: {}".format(destination_dir))
with ZipFile(zip_file, 'r') as unzipper:
# Extract all the files in the archive
unzipper.extractall(path=destination_dir)
print("Deleting zip file: {}".format(zip_file))
os.remove(zip_file) # delete the zip file
else:
with open(path, 'wb') as f:
f.write(response.read())
return path
def download_model(self, resource_dir=None):
"""Download all of the model's resources for later use."""
resources = set(
self.model_atts.constituent_resource(con) for con in
self.model_atts.available_constituents()
)
if not resource_dir:
resource_dir = os.path.join(config['data_dir'], self.model)
for r in resources:
path = os.path.join(resource_dir, r)
if not os.path.exists(path):
self.download(r, resource_dir)
return resource_dir
def remove_model(self):
"""Remove all of the model's resources."""
resource_dir = os.path.join(config['data_dir'], self.model)
if os.path.exists(resource_dir):
import shutil
shutil.rmtree(resource_dir, ignore_errors=True)
def get_datasets(self, constituents, filenames=None):
"""Returns a list of xarray datasets.
Args:
constituents (list[str]): List of the constiuent names to retrieve datasets for
filenames (Optional[list[list[str]]]): Paths to the NetCDF files, parallel with return value if provided.
Only needed by the FES2014 model currently.
Returns:
list[list[Dataset]]: The xarray Datasets for the requested constituents
"""
available = self.available_constituents()
if any(const not in available for const in constituents):
raise ValueError('Constituent not recognized.')
# handle compatible files together
self.datasets = []
for const_group in self.model_atts.constituent_groups():
rsrcs = set(self.model_atts.constituent_resource(const) for const in set(constituents) & set(const_group))
paths = set()
if config['pre_existing_data_dir']:
missing = set()
for r in rsrcs:
path = os.path.join(config['pre_existing_data_dir'], self.model, r)
paths.add(path) if os.path.exists(path) else missing.add(r)
rsrcs = missing
if not rsrcs and paths:
paths_list = list(paths) # If the caller wants filenames, give them as parallel list with return.
self.datasets.append([xr.open_dataset(path) for path in paths_list])
if filenames is not None:
filenames.append(paths_list)
continue
resource_dir = os.path.join(config['data_dir'], self.model)
for r in rsrcs:
path = os.path.join(resource_dir, r)
if not os.path.exists(path):
self.download(r, resource_dir)
paths.add(path)
if paths:
paths_list = list(paths)
self.datasets.append([xr.open_dataset(path) for path in paths_list])
if filenames is not None: # If the caller wants the filenames, give them as parallel list with return.
filenames.append(paths_list)
return self.datasets
| 35.889286
| 119
| 0.584785
|
dc8cc06e219e2fb70bcf9991382b75c47362d60a
| 2,689
|
py
|
Python
|
portal/admin.py
|
dragetd/LambdaCast
|
a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7
|
[
"BSD-2-Clause"
] | 6
|
2015-04-05T01:28:23.000Z
|
2022-02-06T17:29:47.000Z
|
portal/admin.py
|
dragetd/LambdaCast
|
a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7
|
[
"BSD-2-Clause"
] | 2
|
2022-01-05T23:07:10.000Z
|
2022-03-30T17:52:45.000Z
|
portal/admin.py
|
dragetd/LambdaCast
|
a8227d8d19a2fdb1ff1d5e8ad7366d60a1e253f7
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T17:29:53.000Z
|
2022-02-26T17:23:09.000Z
|
from portal.models import MediaItem, MediaFile, Comment, Channel, Hotfolder, Collection, Submittal
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django import forms
from django.forms.widgets import Select
class MediaItemForm(forms.ModelForm):
class Meta:
widgets = {
'license': Select(attrs={'class': 'vLargeTextField'}),
}
def make_published(modeladmin, request, queryset):
queryset.update(published=True)
make_published.short_description = _(u"Publish marked media")
def make_torrent_done(modeladmin, request, queryset):
queryset.update(torrentDone=True)
make_torrent_done.short_description = _(u"Marked media all get a torrent")
class MediaFileInline(admin.TabularInline):
model = MediaFile
extra = 0
fieldsets = (
(None, {
'fields': ('title', 'url', 'file_format', 'media_item')
}),
)
class MediaItemAdmin (admin.ModelAdmin):
form = MediaItemForm
list_display = ['title','published','encodingDone', 'channel' ,'date']
ordering = ['-date','-created']
actions = [make_published,make_torrent_done]
list_filter = ('published', 'channel')
fieldsets = (
(None, {
'fields': ('title', 'date', 'description', 'channel', 'license', 'linkURL', 'tags', 'published')
}),
(_(u'Advanced options'), {
'classes': ('collapse',),
'fields': ('user','torrentURL','videoThumbURL','audioThumbURL','duration','autoPublish','encodingDone','torrentDone')
}),
)
inlines = [
MediaFileInline,
]
admin.site.register(MediaItem,MediaItemAdmin)
def make_moderated(modeladmin,request, queryset):
queryset.update(moderated=True)
make_moderated.short_description = _(u"Moderate marked comments")
class CommentAdmin (admin.ModelAdmin):
list_display = ['comment','item','created','name','ip','moderated']
ordering = ['-created']
actions = [make_moderated]
admin.site.register(Comment,CommentAdmin)
class ChannelAdmin (admin.ModelAdmin):
list_display = ['name','description','featured']
ordering = ['-created']
admin.site.register(Channel,ChannelAdmin)
class HotfolderAdmin (admin.ModelAdmin):
list_display = ['folderName','activated','autoPublish','channel']
ordering = ['-created']
admin.site.register(Hotfolder,HotfolderAdmin)
class CollectionAdmin(admin.ModelAdmin):
list_display = ['title','date','channel']
ordering = ['-date','-created']
admin.site.register(Collection,CollectionAdmin)
class SubmittalAdmin(admin.ModelAdmin):
list_display = ['title','description']
admin.site.register(Submittal,SubmittalAdmin)
| 31.635294
| 129
| 0.689476
|
8ab53045a23dfd564f0ae15e5536061d94539885
| 286
|
py
|
Python
|
mod_async/__init__.py
|
lgfrbcsgo/wot-async
|
53cb44246944463e29567f5c7fc6440990252e61
|
[
"MIT"
] | 1
|
2020-10-13T07:11:22.000Z
|
2020-10-13T07:11:22.000Z
|
mod_async/__init__.py
|
lgfrbcsgo/wot-async
|
53cb44246944463e29567f5c7fc6440990252e61
|
[
"MIT"
] | null | null | null |
mod_async/__init__.py
|
lgfrbcsgo/wot-async
|
53cb44246944463e29567f5c7fc6440990252e61
|
[
"MIT"
] | null | null | null |
from mod_async.async import (
AsyncMutex,
AsyncSemaphore,
AsyncValue,
CallbackCancelled,
Return,
async_task,
auto_run,
from_adisp,
from_future,
run,
)
try:
from mod_async.bw import TimeoutExpired, delay, timeout
except ImportError:
pass
| 15.888889
| 59
| 0.678322
|
0488e4fcf8c51843a3d62fdf711983ce793a98b4
| 4,260
|
py
|
Python
|
tests/sagemaker/test_multi_node_data_parallel.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 101
|
2021-12-22T00:03:51.000Z
|
2022-03-30T07:39:09.000Z
|
tests/sagemaker/test_multi_node_data_parallel.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 13
|
2020-10-13T11:41:11.000Z
|
2022-02-16T14:13:31.000Z
|
tests/sagemaker/test_multi_node_data_parallel.py
|
MarcelGM/transformers
|
aad1d9b6d5c58fd974618ac0aead1c5bd1119467
|
[
"Apache-2.0"
] | 30
|
2021-04-30T07:11:22.000Z
|
2022-03-15T19:34:58.000Z
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True,
reason="Skipping test because should only be run when releasing minor transformers version",
)
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
]
)
class MultiNodeTest(unittest.TestCase):
def setUp(self):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(),
encoding="utf-8",
check=True,
)
assert hasattr(self, "env")
def create_estimator(self, instance_count):
job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script,
source_dir=self.env.test_path,
role=self.env.role,
image_uri=self.env.image_uri,
base_job_name=job_name,
instance_count=instance_count,
instance_type=self.instance_type,
debugger_hook_config=False,
hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path},
metric_definitions=self.env.metric_definitions,
distribution=distribution,
py_version="py36",
)
def save_results_as_csv(self, job_name):
TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
# @parameterized.expand([(2,), (4,),])
@parameterized.expand([(2,)])
def test_script(self, instance_count):
# create estimator
estimator = self.create_estimator(instance_count)
# run training
estimator.fit()
# result dataframe
result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
train_runtime = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
| 38.378378
| 118
| 0.639906
|
5f55c3a286a8866892185c6846e90c3400ebfa44
| 19
|
py
|
Python
|
vedh/ved.py
|
Vedhharale/Mail_Automatin
|
1af7f6484f95244171a99afef60f20d5dfc2cafc
|
[
"MIT"
] | null | null | null |
vedh/ved.py
|
Vedhharale/Mail_Automatin
|
1af7f6484f95244171a99afef60f20d5dfc2cafc
|
[
"MIT"
] | null | null | null |
vedh/ved.py
|
Vedhharale/Mail_Automatin
|
1af7f6484f95244171a99afef60f20d5dfc2cafc
|
[
"MIT"
] | null | null | null |
a=1
b=2
print(a+b)
| 4.75
| 10
| 0.578947
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.