hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3523f7a4e21ce7b43e6d91a635c407f4186ed4cb
| 4,766
|
py
|
Python
|
anyway/widgets/suburban_widgets/accident_type_vehicle_type_road_comparison_widget.py
|
volsky/anyway
|
5d5a2600723392f1a55116a3b5d5b1f28a3ed029
|
[
"MIT"
] | 1
|
2022-01-19T18:23:03.000Z
|
2022-01-19T18:23:03.000Z
|
anyway/widgets/suburban_widgets/accident_type_vehicle_type_road_comparison_widget.py
|
volsky/anyway
|
5d5a2600723392f1a55116a3b5d5b1f28a3ed029
|
[
"MIT"
] | null | null | null |
anyway/widgets/suburban_widgets/accident_type_vehicle_type_road_comparison_widget.py
|
volsky/anyway
|
5d5a2600723392f1a55116a3b5d5b1f28a3ed029
|
[
"MIT"
] | null | null | null |
import datetime
import logging
from typing import List, Dict
from flask_babel import _
from sqlalchemy import func, distinct, desc
from anyway.request_params import RequestParams
from anyway.app_and_db import db
from anyway.widgets.widget_utils import get_query, run_query
from anyway.models import VehicleMarkerView, AccidentType
from anyway.vehicle_type import VehicleCategory
from anyway.widgets.suburban_widgets.sub_urban_widget import SubUrbanWidget
# TODO: register?
class AccidentTypeVehicleTypeRoadComparisonWidget(SubUrbanWidget):
name: str = "vehicle_accident_vs_all_accidents" # WIP: change by vehicle type
MAX_ACCIDENT_TYPES_TO_RETURN: int = 5
def __init__(self, request_params: RequestParams):
super().__init__(request_params, type(self).name)
self.road_number: str = request_params.location_info["road1"]
# WIP: change rank, text by vehicle type
self.rank = 25
def generate_items(self) -> None:
self.items = AccidentTypeVehicleTypeRoadComparisonWidget.accident_type_road_vs_all_count(
self.request_params.start_time, self.request_params.end_time, self.road_number
)
@staticmethod
def accident_type_road_vs_all_count(
start_time: datetime.date, end_time: datetime.date, road_number: str
) -> List:
num_accidents_label = "num_of_accidents"
location_all = "כל הארץ"
location_road = f"כביש {int(road_number)}"
vehicle_types = VehicleCategory.MOTORCYCLE.get_codes() # WIP: change by vehicle type
all_roads_query = (
AccidentTypeVehicleTypeRoadComparisonWidget.get_accident_count_by_vehicle_type_query(
start_time, end_time, num_accidents_label, vehicle_types
)
)
all_roads_query_result = run_query(all_roads_query)
all_roads_sum_accidents = 0
all_roads_map = {}
for record in all_roads_query_result:
all_roads_sum_accidents += record[num_accidents_label]
all_roads_map[record[VehicleMarkerView.accident_type.name]] = record[
num_accidents_label
]
road_query = all_roads_query.filter(
(VehicleMarkerView.road1 == road_number) | (VehicleMarkerView.road2 == road_number)
)
road_query_result = run_query(road_query)
road_sum_accidents = 0
types_to_report = []
for record in road_query_result:
road_sum_accidents += record[num_accidents_label]
for record in road_query_result:
if (
len(types_to_report)
== AccidentTypeVehicleTypeRoadComparisonWidget.MAX_ACCIDENT_TYPES_TO_RETURN
):
break
accident_type = record[VehicleMarkerView.accident_type.name]
types_to_report.append(
{
VehicleMarkerView.accident_type.name: accident_type,
location_road: record[num_accidents_label] / road_sum_accidents,
location_all: all_roads_map[accident_type] / all_roads_sum_accidents,
}
)
return types_to_report
@staticmethod
def get_accident_count_by_vehicle_type_query(
start_time: datetime.date,
end_time: datetime.date,
num_accidents_label: str,
vehicle_types: List[int],
) -> db.session.query:
return (
get_query(
table_obj=VehicleMarkerView,
start_time=start_time,
end_time=end_time,
filters={VehicleMarkerView.vehicle_type.name: vehicle_types},
)
.with_entities(
VehicleMarkerView.accident_type,
func.count(distinct(VehicleMarkerView.provider_and_id)).label(num_accidents_label),
)
.group_by(VehicleMarkerView.accident_type)
.order_by(desc(num_accidents_label))
)
@staticmethod
def localize_items(request_params: RequestParams, items: Dict) -> Dict:
for item in items["data"]["items"]:
try:
item[VehicleMarkerView.accident_type.name] = _(
AccidentType(item["accident_type"]).get_label()
)
except KeyError:
logging.exception(
f"AccidentTypeVehicleTypeRoadComparisonWidget.localize_items: Exception while translating {item}."
)
items["data"]["text"] = {
# TODO: after registering decide on title
"title": "Number of accidents by vehicle type by severity"
}
return items
| 40.05042
| 119
| 0.638271
|
ec2309b1674981efc3ac923b77b4cee1c9c1927c
| 34,578
|
py
|
Python
|
brouillon.py
|
AlexandreFiche/machine_learning_for_autonomous_driving
|
e2749e7408cb4c26ed94a69b66c975e641c33838
|
[
"MIT"
] | 1
|
2020-07-22T09:13:17.000Z
|
2020-07-22T09:13:17.000Z
|
brouillon.py
|
AlexandreFiche/machine_learning_for_autonomous_driving
|
e2749e7408cb4c26ed94a69b66c975e641c33838
|
[
"MIT"
] | null | null | null |
brouillon.py
|
AlexandreFiche/machine_learning_for_autonomous_driving
|
e2749e7408cb4c26ed94a69b66c975e641c33838
|
[
"MIT"
] | 1
|
2021-04-16T13:05:43.000Z
|
2021-04-16T13:05:43.000Z
|
# Brouillon pour stocker des bouts de codes qui peuvent reservir
# 15/06
# 1
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
anntoken = "bc3180b07f8e4a728f504ded654df56f"
ann_record = nusc.get('sample_annotation',anntoken)
sample_record = nusc.get('sample', ann_record['sample_token'])
boxes, cam = [], []
cams = [key for key in sample_record['data'].keys() if 'CAM' in key]
print(cams)
inst_token = nusc.get('instance',ann_record['instance_token'])
print(inst_token)
cams_check = []
for cam in cams:
_, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[anntoken])
if len(boxes) > 0:
cams_check += [cam]
print(cams_check)
#['CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_FRONT_LEFT']
#{'token': 'c1958768d48640948f6053d04cffd35b', 'category_token': 'fd69059b62a3469fbaef25340c0eab7f', 'nbr_annotations': 39, 'first_annotation_token': '49f76277d07541c5a584aa14c9d28754', 'last_annotation_token': 'bc3180b07f8e4a728f504ded654df56f'}
#['CAM_FRONT', 'CAM_FRONT_LEFT']
# dans cette extrait, j'ai oublié de changer sample_record à chaque tour de boucle
# normalement boxes est censé être vide sauf quand on sera sur le bon sample de la bonne annotation
# mais non, a chaque fois quasiment j'avais des boxes retourné, je n'ai pas trouvé pourquoi.
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
def find_vehicle_follow(instance_token):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_annotation'])
cams_check = []
for cam in cams:
_, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0:
cams_check += [cam]
print(cams_check)
curr_token = curr_ann['next']
#nusc.render_annotation(curr_token)
find_vehicle_follow("c1958768d48640948f6053d04cffd35b")
# v2 14h, je decale sur une autre méthode (pas ce code):
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
# renvoie
# renvoie vrai et un dataframe rempli s'il y a un véhicule en face, faux et dataframe vide sinon
def find_vehicle_in_front(instance_token,utime):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
# sans vitesse du véhicule en face actuellement
columns = ["distance,throttle,ego_speed"]
dataframe = pd.DataFrame(columns=columns)
rows_list = []
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_token'])
cams_check = []
# récupérer les caméras qui ont vu l'annotation
for cam in cams:
_, boxes, _ = nusc.get_sample_data(curr_sample['data'][cam], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0:
cams_check += [cam]
#print(cams_check)
curr_token = curr_ann['next']
#calcul distance entre ego et le vehicule
lidar = nusc.get('sample_data',curr_sample['data']['LIDAR_TOP'])
ego_pos = nusc.get('ego_pose',lidar['ego_pose_token'])
dist = np.linalg.norm(np.array(ego_pos['translation']) - np.array(curr_ann['translation']))
#print(dist)
dic = {'distance':dist}
rows_list += [dic]
print(curr_sample["timestamp"] in utime)
print(curr_sample["timestamp"])
print(len(rows_list))
dic_scene = nusc_can.get_messages(scene_test['name'],'vehicle_monitor')
utime = [ d["utime"] for d in dic_scene ]
print(len(utime))
print(utime)
find_vehicle_in_front("c1958768d48640948f6053d04cffd35b",utime)
scene_test = nusc.scene[58]
dic_scene = nusc_can.get_messages(scene_test['name'],'vehicle_monitor')
features = ["vehicle_speed","steering","throttle","left_signal","right_signal"]
df_scene = pd.DataFrame.from_dict(dic_scene)[features]
#dic_scene
#
last = nusc.get('sample',scene['last_sample_token'])
while(curr_sample['timestamp'] < last['timestamp']):
#print(curr_sample['timestamp'] )
list_sample += [curr_sample['timestamp']]
curr_sample = nusc.get('sample',curr_sample['next'])
i += 1
print(i)
print(curr_sample['timestamp'])
print(len(utime[i:]))
print([list_sample[i] - list_sample[i+1] for i in range(len(list_sample)-1)])
###
# 16 juin
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
# renvoie une liste des informations du vehicule (meme nombre que le nombre d'annotation)
# par defaut ce nombre peut être differents car les timestamps ne sont pas les meme
def get_list_info(instance_token):
instance = nusc.get('instance',instance_token)
ann = nusc.get('sample_annotation',instance["first_annotation_token"])
sample = nusc.get('sample',ann['sample_token'])
scene = nusc.get('scene',sample['scene_token'])
dict_scene = nusc_can.get_messages(scene['name'],'vehicle_monitor')
curr_sample = sample
i = 0
list_info = []
last = nusc.get('sample',scene['last_sample_token'])
while(curr_sample['timestamp'] < last['timestamp']):
if(curr_sample['timestamp'] > dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
curr_sample = nusc.get('sample',curr_sample['next'])
if(curr_sample['timestamp'] < dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
return list_info
# renvoie vrai et un un tableau rempli si l'instance est en face d'ego
def find_vehicle_in_front(instance_token):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
info_list = get_list_info(instance_token)
rows_list = []
i = 0
# Pour chaque enregistrement de l'annoation on ajoute une ligne avec les elements
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_token'])
cams_check = []
# récupérer les caméras qui ont vu l'annotation
_, boxes, _ = nusc.get_sample_data(curr_sample['data']['CAM_FRONT'], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0 and abs(info_list[i]['steering']) < 100:
#calcul distance entre ego et le vehicule
lidar = nusc.get('sample_data',curr_sample['data']['LIDAR_TOP'])
ego_pos = nusc.get('ego_pose',lidar['ego_pose_token'])
dist = np.linalg.norm(np.array(ego_pos['translation']) - np.array(curr_ann['translation']))
dic = {'distance':dist,'throttle':info_list[i]['throttle'],'ego_speed':info_list[i]['vehicle_speed']
,'brake':info_list[i]['brake']}
rows_list += [dic]
curr_token = curr_ann['next']
i +=1
#print(len(rows_list)," lignes ajoutées")
return len(rows_list)!=0,rows_list
out.release()
blackint = nusc_can.can_blacklist
blacklist = [ "scene-0"+ str(i) for i in blackint]
# Liste toutes les instances d'une scene
def get_instances_scene(scene):
sample = nusc.get('sample',scene['first_sample_token'])
list_instances = []
while sample['token'] != scene['last_sample_token']:
anns = sample['anns']
for ann_token in anns:
ann = nusc.get('sample_annotation',ann_token)
instance = nusc.get('instance',ann['instance_token'])
category = nusc.get('category',instance['category_token'])
if not instance in list_instances and "vehicle" in category['name']:
#print(category['name'])
list_instances += [instance]
sample = nusc.get('sample',sample['next'])
return list_instances
# Explore chaque scene, puis chaque instance de cette scene qui est un vehicle en mouvement (devant)
# Cree un dataframe avec pour entree distance au vehicle, ego_vitesse, ego_accel, ego_brake
# et vehicle_vitesse (pas mtn)
def build_dataframe_for_vehicle_in_front():
scenes = nusc.scene[:100]
list_rows = []
for s in scenes:
if s not in blacklist and s not in ["scene-003"]:
list_instances = get_instances_scene(s)
for inst in list_instances:
ok, res = find_vehicle_in_front(inst['token'])
if ok:
list_rows += res
dataframe = pd.DataFrame.from_dict(list_rows)
print(dataframe)
print(dataframe.describe())
return dataframe
#find_vehicle_in_front("c1958768d48640948f6053d04cffd35b")
# 15k ligne sans contrainte sur steering (100 scenes)
df_vehicle = build_dataframe_for_vehicle_in_front()
# 17 juin modification pretraitement: sauvegarde des fonctions
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
# renvoie une liste des informations du vehicule (meme nombre que le nombre d'annotation)
# par defaut ce nombre peut être differents car les timestamps ne sont pas les meme
def get_list_info(instance_token):
instance = nusc.get('instance',instance_token)
ann = nusc.get('sample_annotation',instance["first_annotation_token"])
sample = nusc.get('sample',ann['sample_token'])
scene = nusc.get('scene',sample['scene_token'])
dict_scene = nusc_can.get_messages(scene['name'],'vehicle_monitor')
curr_sample = sample
i = 0
list_info = []
last = nusc.get('sample',scene['last_sample_token'])
while(curr_sample['timestamp'] < last['timestamp']):
if(curr_sample['timestamp'] > dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
curr_sample = nusc.get('sample',curr_sample['next'])
if(curr_sample['timestamp'] < dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
return list_info
# renvoie vrai et un un tableau rempli si l'instance est en face d'ego
def find_vehicle_in_front(instance_token):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
info_list = get_list_info(instance_token)
rows_list = []
i = 0
# Pour chaque enregistrement de l'annoation on ajoute une ligne avec les elements
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_token'])
scene = scene = nusc.get('scene',curr_sample['scene_token'])
cams_check = []
# récupérer les caméras qui ont vu l'annotation
_, boxes, _ = nusc.get_sample_data(curr_sample['data']['CAM_FRONT'], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0 and abs(info_list[i]['steering']) < 100:
#calcul distance entre ego et le vehicule
lidar = nusc.get('sample_data',curr_sample['data']['LIDAR_TOP'])
ego_pos = nusc.get('ego_pose',lidar['ego_pose_token'])
dist = np.linalg.norm(np.array(ego_pos['translation']) - np.array(curr_ann['translation']))
dic = {'distance':dist,'throttle':info_list[i]['throttle'],'ego_speed':info_list[i]['vehicle_speed']
,'brake':info_list[i]['brake'],'future_throttle':info_list[i+1]['throttle'],'future_brake':info_list[i+1]['brake']}
rows_list += [dic]
if info_list[i]['brake'] > 10:
#print(scene['name'])
pass
curr_token = curr_ann['next']
i +=1
#print(len(rows_list)," lignes ajoutées")
return len(rows_list)!=0,rows_list
def show_infos(dataframe,num_frame):
if num_frame < taille:
cv2.putText(im, 'vitesse:'+ str(dataframe.at[int(num_frame/25),'ego_speed']),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
def gestion(dataframe):
i = 0
nb_ligne = dataframe.shape[0]
sample = nusc.get('sample',scene['first_sample_token'])
list_instances = []
while sample['token'] != scene['last_sample_token']:
#print(sample['timestamp'],' a ')
df = dataframe[dataframe['timestamp'] == sample['timestamp']]
i += 1
if i == 6:
i = 0
sample = nusc.get('sample',sample['next'])
# renvoie vrai et un un tableau rempli si l'instance est en face d'ego
def find_vehicle_in_front(instance_token):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
info_list = get_list_info(instance_token)
rows_list = []
i = 0
# Pour chaque enregistrement de l'annoation on ajoute une ligne avec les elements
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_token'])
scene = scene = nusc.get('scene',curr_sample['scene_token'])
cams_check = []
# récupérer les caméras qui ont vu l'annotation
_, boxes, _ = nusc.get_sample_data(curr_sample['data']['CAM_FRONT'], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0 and abs(info_list[i]['steering']) < 100:
#calcul distance entre ego et le vehicule
lidar = nusc.get('sample_data',curr_sample['data']['LIDAR_TOP'])
ego_pos = nusc.get('ego_pose',lidar['ego_pose_token'])
dist = np.linalg.norm(np.array(ego_pos['translation']) - np.array(curr_ann['translation']))
dic = {'scene':scene['name'],'timestamp':curr_sample['timestamp'],'inst_token':instance_token,'ann_token':curr_token,'distance':round(dist,3),'throttle':info_list[i]['throttle'],'ego_speed':round(info_list[i]['vehicle_speed'],3)
,'brake':info_list[i]['brake'],'future_throttle':info_list[i+1]['throttle'],'future_brake':info_list[i+1]['brake']}
rows_list += [dic]
if info_list[i]['brake'] > 10:
#print(scene['name'])
pass
curr_token = curr_ann['next']
i +=1
# 18 juin
# renvoie vrai et un un tableau rempli si l'instance est en face d'ego
def find_vehicle_in_front_b(instance_token):
instance = nusc.get('instance',instance_token)
last_token = instance["last_annotation_token"]
curr_token = instance["first_annotation_token"]
info_list = get_list_info(instance_token)
rows_list = []
i = 0
# Pour chaque enregistrement de l'annoation on ajoute une ligne avec les elements
while curr_token != last_token:
curr_ann = nusc.get('sample_annotation',curr_token)
curr_sample = nusc.get('sample',curr_ann['sample_token'])
scene = scene = nusc.get('scene',curr_sample['scene_token'])
cams_check = []
# récupérer les caméras qui ont vu l'annotation
_, boxes, _ = nusc.get_sample_data(curr_sample['data']['CAM_FRONT'], box_vis_level=BoxVisibility.ANY,
selected_anntokens=[curr_token])
if len(boxes) > 0 and abs(info_list[i]['steering']) < 100:
#calcul distance entre ego et le vehicule
lidar = nusc.get('sample_data',curr_sample['data']['LIDAR_TOP'])
ego_pos = nusc.get('ego_pose',lidar['ego_pose_token'])
dist = np.linalg.norm(np.array(ego_pos['translation']) - np.array(curr_ann['translation']))
dic = {'scene':scene['name'],'timestamp':curr_sample['timestamp'],'inst_token':instance_token,'ann_token':curr_token,'distance':round(dist,3),'throttle':info_list[i]['throttle'],'ego_speed':round(info_list[i]['vehicle_speed'],3)
,'brake':info_list[i]['brake'],'future_throttle':info_list[i+1]['throttle'],'future_brake':info_list[i+1]['brake']}
rows_list += [dic]
if info_list[i]['brake'] > 10:
#print(scene['name'])
pass
curr_token = curr_ann['next']
i +=1
print(len(rows_list),len(info_list))
return len(rows_list)!=0,rows_list
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility
def get_list_info_v2(instance_token):
instance = nusc.get('instance',instance_token)
ann = nusc.get('sample_annotation',instance["first_annotation_token"])
sample = nusc.get('sample',ann['sample_token'])
scene = nusc.get('scene',sample['scene_token'])
dict_scene = nusc_can.get_messages(scene['name'],'vehicle_monitor')
curr_sample = sample
i = 0
list_info = []
last = nusc.get('sample',scene['last_sample_token'])
while curr_sample['timestamp'] <= dict_scene[i]['utime']:
i += 1
curr_sample = nusc.get('sample',curr_sample['next'])
while(curr_sample['timestamp'] < last['timestamp']):
if(curr_sample['timestamp'] > dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
curr_sample = nusc.get('sample',curr_sample['next'])
if(curr_sample['timestamp'] < dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
return list_info
# 1532402936198962 1532402936699359 1532402937198682
# renvoie une liste des informations du vehicule (meme nombre que le nombre d'annotation)
# par defaut ce nombre peut être differents car les timestamps ne sont pas les meme
def get_list_info(instance_token):
instance = nusc.get('instance',instance_token)
ann = nusc.get('sample_annotation',instance["first_annotation_token"])
sample = nusc.get('sample',ann['sample_token'])
scene = nusc.get('scene',sample['scene_token'])
dict_scene = nusc_can.get_messages(scene['name'],'vehicle_monitor')
curr_sample = sample
i = 0
list_info = []
last = nusc.get('sample',scene['last_sample_token'])
while(curr_sample['timestamp'] < last['timestamp']):
if(curr_sample['timestamp'] > dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
curr_sample = nusc.get('sample',curr_sample['next'])
if(curr_sample['timestamp'] < dict_scene[i]['utime'] and i < len(dict_scene)-1):
i += 1
list_info += [dict_scene[i]]
#print([ e['utime'] - 1532402900000000 for e in dict_scene])
return list_info
# 19 juin
df_ego = df[df['inst_token'] == "vehicle_info"]
#df_ego
list_vec = [(df_ego.iloc[i+1]['ego_pos'][0] - df_ego.iloc[i]['ego_pos'][0],
df_ego.iloc[i+1]['ego_pos'][1] - df_ego.iloc[i]['ego_pos'][1])
for i in range(df_ego.shape[0]-1) ]
list_vitesse = [df_ego.iloc[i]['ego_speed'] for i in range(df_ego.shape[0]-1)]
list_vec_norm = [ (v[0]/np.sqrt((v[0]*v[0] + v[1]*v[1])),v[1]/np.sqrt((v[0]*v[0] + v[1]*v[1])))
for v in list_vec ]
#print(list_vec)
list_vec_norm
for i in range(df_ego.shape[0]-1):
# tuple(map(operator.add, df_ego.iloc[i]['ego_pos'],
r = [e * list_vitesse[i]/3.6*0.5 for e in list_vec_norm[i]]
#print(list_vec_norm[i])
new_pos = list(map(operator.add, df_ego.iloc[i]['ego_pos'],r))
new_pos = [round(e,3) for e in new_pos]
print(new_pos,df_ego.iloc[i+1]['ego_pos'])
# 23 juin
for box in boxes:
#angle = 0
if (((box.center[0] > -2 - angle and box.center[0] < 2 - angle and box.center[2] < 20) or
(box.center[0] > -3 - angle and box.center[0] < 3 - angle and box.center[2] < 40 and box.center[2] > 20) or
(box.center[0] > -6 - angle and box.center[0] < 6 - angle and box.center[2] > 40)) and "vehicle" in box.name
and box.center[2] < dmin):
dmin = box.center[2]
minbox = box
# Affichage informations
if sample['token'] != scene['last_sample_token']:
if not df_curr.empty:
#print("passe")
if dmin != 999:
cv2.line(im, (int(800+minbox.center[0]*20), 100), (int(800+minbox.center[0]*20), 800), (255, 255, 0), thickness=2)
cv2.putText(im, 'Center:'+ str(round(minbox.center[0],3))+"\n "+str(round(minbox.center[2],2)),
(int(800+minbox.center[0]*10),250),
font,
fontScale,
(255, 0, 255),
lineType)
cv2.line(im, (int(1200-angle*20), 100), (int(1200-angle*20), 800), (255, 0, 0), thickness=2)
cv2.line(im, (int(400-angle*20), 100), (int(400-angle*20), 800), (255, 0, 0), thickness=2)
import cv2
from typing import Tuple, List
import os.path as osp
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix
import operator
# parametres pour cv2
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (0,500)
fontScale = 1
fontColor = (255,255,255)
color = (255,0,0)
lineType = 2
pas = (0,50)
def get_color(category_name: str) -> Tuple[int, int, int]:
"""
Provides the default colors based on the category names.
This method works for the general nuScenes categories, as well as the nuScenes detection categories.
"""
if 'bicycle' in category_name or 'motorcycle' in category_name:
return 255, 61, 99 # Red
elif 'vehicle' in category_name or category_name in ['bus', 'car', 'construction_vehicle', 'trailer', 'truck']:
return 255, 158, 0 # Orange
elif 'pedestrian' in category_name:
return 0, 0, 230 # Blue
elif 'cone' in category_name or 'barrier' in category_name:
return 0, 0, 0 # Black
else:
return 255, 0, 255 # Magenta
def affichage(im,df_curr):
cv2.putText(im, 'Vitesse:'+ str(df_curr.iat[0,9]),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.putText(im, 'Angle volant:'+ str(df_curr.iat[0,8]/20),
tuple(map(operator.add, bottomLeftCornerOfText,(0,50))),
font,
fontScale,
fontColor,
lineType)
cv2.putText(im, 'Acceleration:'+ str(df_curr.iat[0,10]),
tuple(map(operator.add, bottomLeftCornerOfText,(0,100))),
font,
fontScale,
fontColor,
lineType)
cv2.putText(im, 'Frein:'+ str(df_curr.iat[0,11]),
tuple(map(operator.add, bottomLeftCornerOfText,(0,150))),
font,
fontScale,
fontColor,
lineType)
cv2.putText(im, 'Acceleration (Pred):'+ str(df_curr.iat[0,12]),
tuple(map(operator.add, bottomLeftCornerOfText,(0,200))),
font,
fontScale,
fontColor,
lineType)
cv2.putText(im, 'Frein (Pred):'+ str(df_curr.iat[0,11]),
tuple(map(operator.add, bottomLeftCornerOfText,(0,250))),
font,
fontScale,
fontColor,
lineType)
if df_curr.shape[0] > 1:
cv2.putText(im, 'Distance:'+ str(df_curr.iloc[1]['distance']),
tuple(map(operator.add, bottomLeftCornerOfText,(0,300))),
font,
fontScale,
color,
lineType)
def draw_rect(im,selected_corners, color):
prev = selected_corners[-1]
for corner in selected_corners:
cv2.line(im,
(int(prev[0]), int(prev[1])),
(int(corner[0]), int(corner[1])),
color, 2)
prev = corner
def render_scene_channel_with_predict(nusc,
scene_token: str, dataframe,
channel: str = 'CAM_FRONT',
freq: float = 10,
imsize: Tuple[float, float] = (960, 540),
out_path: str = None) -> None:
"""
Renders a full scene for a particular camera channel.
:param scene_token: Unique identifier of scene to render.
:param channel: Channel to render.
:param freq: Display frequency (Hz).
:param imsize: Size of image to render. The larger the slower this will run.
:param out_path: Optional path to write a video file of the rendered frames.
"""
valid_channels = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
assert imsize[0] / imsize[1] == 16 / 9, "Aspect ratio should be 16/9."
assert channel in valid_channels, 'Input channel {} not valid.'.format(channel)
if out_path is not None:
assert osp.splitext(out_path)[-1] == '.avi'
# Get records from DB
scene_rec = nusc.get('scene', scene_token)
sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
sd_rec = nusc.get('sample_data', sample_rec['data'][channel])
# Open CV init
name = '{}: {} (Space to pause, ESC to exit)'.format(scene_rec['name'], channel)
cv2.namedWindow(name)
cv2.moveWindow(name, 0, 0)
if out_path is not None:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(out_path, fourcc, freq, imsize)
else:
out = None
# parametres pour cv2
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,500)
fontScale = 1
fontColor = (255,255,255)
color = (255,0,0)
lineType = 2
pas = (0,50)
# 900* 1600
# parametres pour afficher infos
i = 0
taille = dataframe.shape[0]
scene_token = nusc.field2token('scene', 'name', dataframe.at[0,'scene'])[0]
scene = nusc.get('scene',scene_token)
sample = nusc.get('sample',scene['first_sample_token'])
df_curr = dataframe[dataframe['timestamp'] == sample['timestamp']]
df_curr = df_curr.sort_values(by='distance').reset_index(drop=True)
print(df_curr)
has_more_frames = True
angle = df_curr.iat[0,8]
xmin = 10
xmax = - 10
colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155))
# -30.671 39.22340
borne_a = 600
borne_b = 1000
while has_more_frames:
ann = df_curr[df_curr["inst_token"]=="98300b9c4acb4da9a7aecd0084650265"]
ann_tok = ann['ann_token']
# selected_anntokens=[ann_tok.iat[0]]
# Get data from DB
impath, boxes, camera_intrinsic = nusc.get_sample_data(sd_rec['token'],
box_vis_level=BoxVisibility.ANY)
# Load and render
if not osp.exists(impath):
raise Exception('Error: Missing image %s' % impath)
im = cv2.imread(impath)
dmin = 999
minbox = None
for box in boxes:
corners = view_points(box.corners(), camera_intrinsic, normalize=True)[:2, :]
if (box.center[2] < dmin and corners.T[4][0] < borne_b-angle and corners.T[6][0] > borne_a-angle
and "vehicle" in box.name):
dmin = box.center[2]
minbox = box
if box.center[0] < xmin:
xmin = box.center[0]
if box.center[0] > xmax:
xmax = box.center[0]
#print(box.center,ann["distance"].iat[0])
if dmin != 999:
c = get_color(minbox.name)
#minbox.render_cv2(im, view=camera_intrinsic, normalize=True, colors=(c, c, c))
corners = view_points(minbox.corners(), camera_intrinsic, normalize=True)[:2, :]
#draw_rect(im,corners.T[:4], colors[0][::-1])
draw_rect(im,corners.T[4:], colors[1][::-1])
#print(corners.T[4:])
# Affichage informations
if sample['token'] != scene['last_sample_token']:
if not df_curr.empty:
#print("passe")
if dmin != 999:
cv2.line(im, (int((corners.T[4][0]+corners.T[6][0])/2), 400), (int((corners.T[4][0]+corners.T[6][0])/2), 600), (255, 255, 0), thickness=2)
cv2.putText(im, 'Center:'+ str(round(minbox.center[0],3))+"\n "+str(round(minbox.center[2],2)),
(int(800+minbox.center[0]*10),250),
font,
fontScale,
(255, 0, 255),
lineType)
cv2.line(im, (int(borne_b-angle), 400), (int(borne_b-angle), 600), (255, 0, 0), thickness=2)
cv2.line(im, (int(borne_a-angle), 400), (int(borne_a-angle), 600), (255, 0, 0), thickness=2)
affichage(im,df_curr)
else:
print(sample['timestamp'])
if i%6 == 0 and i != 0:
sample = nusc.get('sample',sample['next'])
df_curr = dataframe[dataframe['timestamp'] == sample['timestamp']]
df_curr = df_curr.sort_values(by='distance').reset_index(drop=True)
#print("changement")
if not df_curr.empty:
angle = df_curr.iat[0,8]
#angle = 0
else:
print("fin des données ",i)
# Render
im = cv2.resize(im, imsize)
cv2.imshow(name, im)
if out_path is not None:
out.write(im)
key = cv2.waitKey(10) # Images stored at approx 10 Hz, so wait 10 ms.
if key == 32: # If space is pressed, pause.
key = cv2.waitKey()
if key == 27: # if ESC is pressed, exit
cv2.destroyAllWindows()
break
if not sd_rec['next'] == "":
sd_rec = nusc.get('sample_data', sd_rec['next'])
else:
has_more_frames = False
i += 1
print("nombre de frame: ",i)
print(xmin,xmax)
cv2.destroyAllWindows()
if out_path is not None:
out.release()
# 01 Juillet
# Fonction qui déroule une scene en se basant sur les predictions faites,
# Point de départ = pos initial puis après calcul à chaque tour de boucle par rapport aux retours des modèles
def predict_scene_v1(scene_name):
my_scene_token = nusc.field2token('scene', 'name', scene_name)[0]
scene = nusc.get('scene',my_scene_token)
#nusc.render_scene_channel(my_scene_token, 'CAM_FRONT')
df = build_dataframe_for_one_scene(scene,False)
df_ego = df[df['inst_token'] == "vehicle_info"]
# Initialisation des paramètres
speed = df_ego.iloc[0]['ego_speed']
A = df_ego.iloc[0]['ego_pos'][:2]
B = df_ego.iloc[1]['ego_pos'][:2]
AB = [round(B[0] - A[0],3),round(B[1] - A[1],3)]
ABn = round(AB[0]/np.sqrt((AB[0]*AB[0] + AB[1]*AB[1])),3),round(AB[1]/np.sqrt((AB[0]*AB[0] + AB[1]*AB[1])),3)
#print(A,B,AB,ABn)
log = []
features = ["distance","ego_speed","throttle","brake"]
sample = nusc.get('sample',scene['first_sample_token'])
last = scene['last_sample_token']
i = 0
throttle = 0
brake = 0
print("Position Predite, Position Reel, Distance, vitesse, accélération, freinage")
# Boucle
while i != 30 and sample['token'] != last:
speed = round(speed,3)
distance = compute_distance_cheat(A,ABn,df[df['timestamp']==sample['timestamp']])
data = [[distance,speed,throttle,brake]]
data = [[distance,speed]]
throttle = model_t.predict(data)
brake = model_b.predict(data)
if throttle[0] < 0:
throttle[0] = 0.0
if brake[0] < 0:
brake[0] = 0.0
print(A,df_ego.iloc[i]['ego_pos'][:2],distance,speed,throttle,brake)
#throttle = 0
#brake = 0
speed = speed + throttle[0]/10 - brake[0] - 0.5
if speed < 0:
speed = 0
# Calcul nouveau point
A = B
deplacement = [e * speed/3.6*0.5 for e in ABn]
#B = list(map(operator.add, B,deplacement))
i += 1
B = df_ego.iloc[i]['ego_pos'][:2]
B = [round(b,3) for b in B]
sample = nusc.get('sample',sample['next'])
AB = [round(B[0] - A[0],3),round(B[1] - A[1],3)]
ABn = round(AB[0]/np.sqrt((AB[0]*AB[0] + AB[1]*AB[1])),3),round(AB[1]/np.sqrt((AB[0]*AB[0] + AB[1]*AB[1])),3)
log += [ABn]
return log
# Premiere version , ne marche pas
def compute_distance(pos,ABn,dataframe):
#dist = np.linalg.norm(np.array(ego['translation']) - np.array(curr_ann['translation']))
dataframe = dataframe.drop(columns=['distance'])
taille = dataframe.shape[0]
dmin = 99
mini = 0
for i in range(taille):
row = dataframe.iloc[i]
if row["inst_token"] != "vehicle_info":
distance_ego = np.linalg.norm(np.array(pos) - np.array(row['object_pos'][:2]))
distance_vecteur_vitesse = np.absolute(p[1] - a * p[0] - c)/ np.sqrt(a*a + 1)
if distance_ego < dmin:
mind = distance
mini = i
print("Distance:",mind," ",dataframe.iloc[mini]['inst_token']," ",dataframe.iloc[mini]['object_pos'])
return mind
#ego_pos = [round(e,3) for e in ego['translation']]
#object_pos = [round(e,3) for e in curr_ann['translation']]
#
scene_name = 'scene-0006'
my_scene_token = nusc.field2token('scene', 'name', scene_name)[0]
scene = nusc.get('scene',my_scene_token)
df_scene = build_dataframe_for_one_scene(scene,False)
df = df_scene
#display(df_scene)
liste_temps = sorted(set(df_scene['timestamp'].to_list()))
#liste_temps = np.sort(np.unique(df_scene['timestamp'].to_numpy()))
print(liste_temps)
list_pos = df[df['timestamp']==1531884156948944]['object_pos'].to_list()
print(list_pos)
a = np.transpose(np.asarray(list_pos))
df[(df['timestamp']==1531884156948944) & (df['inst_token']!='vehicle_info')]
| 40.775943
| 246
| 0.613743
|
1103f18269dc6a0abe0f072fc23da990635fe60e
| 191
|
py
|
Python
|
tests/cpydiff/modules_array_subscrstep.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 692
|
2016-12-19T23:25:35.000Z
|
2022-03-31T14:20:48.000Z
|
tests/cpydiff/modules_array_subscrstep.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 509
|
2017-03-28T19:37:18.000Z
|
2022-03-31T20:31:43.000Z
|
tests/cpydiff/modules_array_subscrstep.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 228
|
2016-12-19T05:03:30.000Z
|
2022-03-22T18:13:00.000Z
|
"""
categories: Modules,array
description: Subscript with step != 1 is not yet implemented
cause: Unknown
workaround: Unknown
"""
import array
a = array.array('b', (1, 2, 3))
print(a[3:2:2])
| 19.1
| 60
| 0.701571
|
d3362f2e330b64a3e5c0f80a67a759ce4837fd86
| 1,365
|
py
|
Python
|
save_images.py
|
clovadev/qualitative-evaluator
|
83fc54bfa7b9599135235d9317741362cb937feb
|
[
"MIT"
] | 2
|
2021-02-22T10:55:29.000Z
|
2021-02-22T10:55:30.000Z
|
save_images.py
|
clovadev/visual-comparator
|
83fc54bfa7b9599135235d9317741362cb937feb
|
[
"MIT"
] | null | null | null |
save_images.py
|
clovadev/visual-comparator
|
83fc54bfa7b9599135235d9317741362cb937feb
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image
import tqdm
import utils
# 디렉토리 설정
config = utils.load_config()
os.makedirs(config['cluster_dir'], exist_ok=True)
# 이미지 경로 가져오기
paths_input_image = utils.get_image_path(config['root'], 'input_image')
paths_groundtruth = utils.get_image_path(config['root'], 'groundtruth')
paths_conventional = utils.get_image_path(config['root'], 'conventional')
paths_proposed = utils.get_image_path(config['root'], 'proposed')
assert len(paths_input_image) == len(paths_groundtruth) == len(paths_conventional) == len(paths_proposed)
# matplotlib rcParams 설정
utils.set_matplotlib_rcparams()
# plot 제작 후 저장
zip_iter = zip(paths_input_image, paths_groundtruth, paths_conventional, paths_proposed)
for paths in tqdm.tqdm(zip_iter, total=len(paths_input_image)):
input_image = Image.open(paths[0])
groundtruth = Image.open(paths[1])
conventional = Image.open(paths[2])
proposed = Image.open(paths[3])
input_image = input_image.resize(groundtruth.size)
assert input_image.size == groundtruth.size == conventional.size == proposed.size
image_name = os.path.split(paths[0])[-1]
titles = ['Input image ({})'.format(image_name), 'Groundtruth', 'Conventional', 'Proposed']
images = [input_image, groundtruth, conventional, proposed]
utils.make_plot(titles, images, os.path.join(config['cluster_dir'], image_name))
| 40.147059
| 105
| 0.752381
|
eec50962d6f1ebb12b0b06e319ec0de50df29635
| 2,635
|
py
|
Python
|
budget-rnn/src/layers/output_layers.py
|
tejaskannan/ml-models
|
ad5acad2c0ce75773062ffcdff088a6fbe5ffc17
|
[
"Apache-2.0"
] | 1
|
2021-06-28T15:40:41.000Z
|
2021-06-28T15:40:41.000Z
|
budget-rnn/src/layers/output_layers.py
|
tejaskannan/ml-models
|
ad5acad2c0ce75773062ffcdff088a6fbe5ffc17
|
[
"Apache-2.0"
] | 5
|
2021-03-04T19:42:15.000Z
|
2022-02-10T05:46:15.000Z
|
budget-rnn/src/layers/output_layers.py
|
tejaskannan/budget-rnn
|
ad5acad2c0ce75773062ffcdff088a6fbe5ffc17
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from collections import namedtuple
from enum import Enum, auto
from typing import List
from utils.constants import ONE_HALF, SMALL_NUMBER
# Tuples to store output types
ClassificationOutput = namedtuple('ClassificationOutput', ['logits', 'prediction_probs', 'predictions', 'accuracy'])
RegressionOutput = namedtuple('RegressionOutput', ['predictions'])
# Enum to denote output layer type
class OutputType(Enum):
BINARY_CLASSIFICATION = auto()
MULTI_CLASSIFICATION = auto()
REGRESSION = auto()
def is_classification(output_type: OutputType) -> bool:
return output_type in (OutputType.BINARY_CLASSIFICATION, OutputType.MULTI_CLASSIFICATION)
def compute_binary_classification_output(model_output: tf.Tensor, labels: tf.Tensor) -> ClassificationOutput:
"""
Uses the model output and expected output to compute the classification output values for the
given binary classification task.
Args:
model_output: A [B, 1] tensor containing the model outputs (logits) for each batch sample
labels: A [B, 1] float tensor with the correct labels
"""
logits = model_output
predicted_probs = tf.math.sigmoid(logits)
predictions = tf.cast(predicted_probs > ONE_HALF, dtype=tf.float32)
# Compute the batch-wise accuracy
accuracy = tf.reduce_mean(1.0 - tf.abs(predictions - labels))
return ClassificationOutput(logits=logits,
prediction_probs=predicted_probs,
predictions=predictions,
accuracy=accuracy)
def compute_multi_classification_output(model_output: tf.Tensor, labels: tf.Tensor) -> ClassificationOutput:
"""
Uses the model output to compute the multi-class classification output for a given task.
Args:
model_output: A [B, K] or [B, T, K] tensor containing the logits for each batch sample (B) and output class (K)
labels: A [B, 1] or [B, T, 1] int tensor with the expected labels.
"""
logits = model_output # [B, K] / [B, T, K]
predicted_probs = tf.nn.softmax(logits, axis=-1) # [B, K] / [B, T, K]
predictions = tf.math.argmax(predicted_probs, axis=-1, output_type=labels.dtype) # [B] / [B, T]
# Compute the batch-wise accuracy
correct = tf.cast(tf.equal(predictions, tf.squeeze(labels, axis=-1)), dtype=tf.float32)
accuracy = tf.reduce_mean(correct)
return ClassificationOutput(logits=logits,
prediction_probs=predicted_probs,
predictions=predictions,
accuracy=accuracy)
| 39.328358
| 119
| 0.679696
|
f4ed5456b32e588679506c33c399e106f7179181
| 5,647
|
py
|
Python
|
model/quantized_cifar10_resnet.py
|
cornell-zhang/dnn-gating
|
31666fadf35789b433c79eec8669a3a2df818bd4
|
[
"BSD-3-Clause"
] | 58
|
2020-03-03T23:51:24.000Z
|
2022-02-22T14:11:17.000Z
|
model/quantized_cifar10_resnet.py
|
cornell-zhang/dnn-gating
|
31666fadf35789b433c79eec8669a3a2df818bd4
|
[
"BSD-3-Clause"
] | 5
|
2020-10-29T12:59:31.000Z
|
2022-03-26T03:56:50.000Z
|
model/quantized_cifar10_resnet.py
|
cornell-zhang/dnn-gating
|
31666fadf35789b433c79eec8669a3a2df818bd4
|
[
"BSD-3-Clause"
] | 11
|
2020-04-20T09:17:19.000Z
|
2022-02-21T19:05:02.000Z
|
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils.pg_utils as q
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A', **kwargs):
super(BasicBlock, self).__init__()
self.conv1 = q.QuantizedConv2d(in_planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False,
wbits=kwargs['wbits'], abits=kwargs['abits'])
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = q.QuantizedConv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False,
wbits=kwargs['wbits'], abits=kwargs['abits'])
self.bn2 = nn.BatchNorm2d(planes)
self.relu = q.PactReLU() if kwargs['pact'] else nn.ReLU()
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
q.QuantizedConv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False,
wbits=kwargs['wbits'], abits=kwargs['abits']),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, **kwargs):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1, **kwargs)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2, **kwargs)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2, **kwargs)
self.linear = nn.Linear(64, num_classes)
self.relu = q.PactReLU() if kwargs['pact'] else nn.ReLU()
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride, **kwargs):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, **kwargs))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20(num_classes=10, **kwargs):
return ResNet(BasicBlock, [3, 3, 3], num_classes=num_classes, **kwargs)
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
'''
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
'''
| 35.074534
| 120
| 0.602267
|
df93ca6a6ab00cec6d9e2e02bb7354a52f11fb77
| 4,348
|
py
|
Python
|
homeassistant/components/cloud/account_link.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/cloud/account_link.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
homeassistant/components/cloud/account_link.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Account linking via the cloud."""
import asyncio
import logging
from typing import Any
import aiohttp
from hass_nabucasa import account_link
from homeassistant.const import MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_entry_oauth2_flow, event
from .const import DOMAIN
DATA_SERVICES = "cloud_account_link_services"
CACHE_TIMEOUT = 3600
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup(hass: HomeAssistant):
"""Set up cloud account link."""
config_entry_oauth2_flow.async_add_implementation_provider(
hass, DOMAIN, async_provide_implementation
)
async def async_provide_implementation(hass: HomeAssistant, domain: str):
"""Provide an implementation for a domain."""
services = await _get_services(hass)
for service in services:
if service["service"] == domain and _is_older(service["min_version"]):
return CloudOAuth2Implementation(hass, domain)
return
@callback
def _is_older(version: str) -> bool:
"""Test if a version is older than the current HA version."""
version_parts = version.split(".")
if len(version_parts) != 3:
return False
try:
version_parts = [int(val) for val in version_parts]
except ValueError:
return False
patch_number_str = ""
for char in PATCH_VERSION:
if char.isnumeric():
patch_number_str += char
else:
break
try:
patch_number = int(patch_number_str)
except ValueError:
patch_number = 0
cur_version_parts = [MAJOR_VERSION, MINOR_VERSION, patch_number]
return version_parts <= cur_version_parts
async def _get_services(hass):
"""Get the available services."""
services = hass.data.get(DATA_SERVICES)
if services is not None:
return services
try:
services = await account_link.async_fetch_available_services(hass.data[DOMAIN])
except (aiohttp.ClientError, asyncio.TimeoutError):
return []
hass.data[DATA_SERVICES] = services
@callback
def clear_services(_now):
"""Clear services cache."""
hass.data.pop(DATA_SERVICES, None)
event.async_call_later(hass, CACHE_TIMEOUT, clear_services)
return services
class CloudOAuth2Implementation(config_entry_oauth2_flow.AbstractOAuth2Implementation):
"""Cloud implementation of the OAuth2 flow."""
def __init__(self, hass: HomeAssistant, service: str) -> None:
"""Initialize cloud OAuth2 implementation."""
self.hass = hass
self.service = service
@property
def name(self) -> str:
"""Name of the implementation."""
return "Home Assistant Cloud"
@property
def domain(self) -> str:
"""Domain that is providing the implementation."""
return DOMAIN
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
helper = account_link.AuthorizeAccountHelper(
self.hass.data[DOMAIN], self.service
)
authorize_url = await helper.async_get_authorize_url()
async def await_tokens():
"""Wait for tokens and pass them on when received."""
try:
tokens = await helper.async_get_tokens()
except asyncio.TimeoutError:
_LOGGER.info("Timeout fetching tokens for flow %s", flow_id)
except account_link.AccountLinkException as err:
_LOGGER.info(
"Failed to fetch tokens for flow %s: %s", flow_id, err.code
)
else:
await self.hass.config_entries.flow.async_configure(
flow_id=flow_id, user_input=tokens
)
self.hass.async_create_task(await_tokens())
return authorize_url
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve external data to tokens."""
# We already passed in tokens
return external_data
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
return await account_link.async_fetch_access_token(
self.hass.data[DOMAIN], self.service, token["refresh_token"]
)
| 29.181208
| 87
| 0.665363
|
8d28816db7b20937f37f33a546fb79473a6d5c80
| 2,206
|
py
|
Python
|
src/collectors/ping/ping.py
|
hermdog/Diamond
|
0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47
|
[
"MIT"
] | 1,795
|
2015-01-05T11:14:55.000Z
|
2022-03-25T12:07:15.000Z
|
src/collectors/ping/ping.py
|
hermdog/Diamond
|
0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47
|
[
"MIT"
] | 671
|
2015-01-02T05:57:27.000Z
|
2022-03-29T22:39:05.000Z
|
src/collectors/ping/ping.py
|
hermdog/Diamond
|
0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47
|
[
"MIT"
] | 793
|
2015-01-03T01:39:02.000Z
|
2022-02-18T05:12:27.000Z
|
# coding=utf-8
"""
Collect icmp round trip times
Only valid for ipv4 hosts currently
#### Dependencies
* ping
#### Configuration
Configuration is done by:
Create a file named: PingCollector.conf in the collectors_config_path
* enabled = true
* interval = 60
* target_1 = example.org
* target_fw = 192.168.0.1
* target_localhost = localhost
Test your configuration using the following command:
diamond-setup --print -C PingCollector
You should get a response back that indicates 'enabled': True and see entries
for your targets in pairs like:
'target_1': 'example.org'
The graphite nodes pushed are derived from the pinged hostnames by replacing all
dots with underscores, i.e. 'www.example.org' becomes 'www_example_org'.
"""
import diamond.collector
class PingCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(PingCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the ping binary',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PingCollector, self).get_default_config()
config.update({
'path': 'ping',
'bin': '/bin/ping',
})
return config
def collect(self):
for key in self.config.keys():
if key[:7] == "target_":
host = self.config[key]
metric_name = host.replace('.', '_')
ping = self.run_command(['-nq', '-c 1', host])
ping = ping[0].strip().split("\n")[-1]
# Linux
if ping.startswith('rtt'):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# OS X
elif ping.startswith('round-trip '):
ping = ping.split()[3].split('/')[0]
metric_value = float(ping)
# Unknown
else:
metric_value = 10000
self.publish(metric_name, metric_value, precision=3)
| 26.902439
| 80
| 0.575703
|
bcaebe0db87db248a09700ffcb4ef23ad5effdfc
| 2,108
|
py
|
Python
|
tests/st/ops/ascend/vector/test_atan2_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/ascend/vector/test_atan2_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
tests/st/ops/ascend/vector/test_atan2_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""atan2 test case"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.atan2_run import atan2_run
class TestAtan2(TestBase):
def setup(self):
"""setup case parameters for test"""
case_name = "test_akg_atan2_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("=================%s Setup case=================", self.casename)
self.testarg_mini = [
# testflag, opfuncname, testRunArgs, dimArgs
("atan2_f16_01", atan2_run, ((8, 16), "float16", (8, 16), "float16")),
("atan2_f32_02", atan2_run, ((8, 16), "float32", (8, 16), "float32")),
]
self.testarg_cloud = [
# testflag, opfuncname, testRunArgs, dimArgs
("atan2_f16_03", atan2_run, ((32, 256, 16), "float16", (32, 256, 16), "float16")),
("atan2_f32_04", atan2_run, ((32, 256, 16), "float32", (32, 256, 16), "float32")),
]
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mini_run(self):
"""run case for mini"""
self.common_run(self.testarg_mini)
def test_cloud_run(self):
"""run case for cloud"""
self.common_run(self.testarg_cloud)
def teardown(self):
"""clean environment"""
self._log.info("=============%s Teardown===========", self.casename)
| 36.344828
| 94
| 0.638994
|
12dba3d38b62aa957463988bff1d65d17068d6f7
| 19,177
|
py
|
Python
|
tensorflow_probability/python/distributions/power_spherical.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 3,670
|
2018-02-14T03:29:40.000Z
|
2022-03-30T01:19:52.000Z
|
tensorflow_probability/python/distributions/power_spherical.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,395
|
2018-02-24T02:28:49.000Z
|
2022-03-31T16:12:06.000Z
|
tensorflow_probability/python/distributions/power_spherical.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,135
|
2018-02-14T01:51:10.000Z
|
2022-03-28T02:24:11.000Z
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Power Spherical distribution over vectors on the unit hypersphere."""
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import invert as invert_bijector
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.bijectors import square as square_bijector
from tensorflow_probability.python.distributions import beta as beta_lib
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import spherical_uniform
from tensorflow_probability.python.distributions import von_mises_fisher
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.random import random_ops
__all__ = ['PowerSpherical']
class PowerSpherical(distribution.AutoCompositeTensorDistribution):
r"""The Power Spherical distribution over unit vectors on `S^{n-1}`.
The Power Spherical distribution [1] is a distribution over vectors
on the unit hypersphere `S^{n-1}` embedded in `n` dimensions (`R^n`).
It serves as an alternative to the von Mises-Fisher distribution with a
simpler (faster) `log_prob` calculation, as well as a reparameterizable
sampler. In contrast, the Power Spherical distribution does have
`-mean_direction` as a point with zero density (and hence a neighborhood
around that having arbitrarily small density), in contrast with the
von Mises-Fisher distribution which has non-zero density everywhere.
NOTE: `mean_direction` is not in general the mean of the distribution. For
spherical distributions, the mean is generally not in the support of the
distribution.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, kappa) = C(kappa) (1 + mu^T x) ** kappa
where,
C(kappa) = 2**(a + b) pi**b Gamma(a) / Gamma(a + b)
a = (n - 1) / 2. + kappa
b = (n - 1) / 2.
```
where:
* `mean_direction = mu`; a unit vector in `R^n`,
* `concentration = kappa`; scalar real >= 0, concentration of samples around
`mean_direction`, where 0 pertains to the uniform distribution on the
hypersphere, and \inf indicates a delta function at `mean_direction`.
#### Examples
A single instance of a PowerSpherical distribution is defined by a mean
direction unit vector.
Extra leading dimensions, if provided, allow for batches.
```python
tfd = tfp.distributions
# Initialize a single 3-dimension PowerSpherical distribution.
mu = [0., 1, 0]
conc = 1.
ps = tfd.PowerSpherical(mean_direction=mu, concentration=conc)
# Evaluate this on an observation in S^2 (in R^3), returning a scalar.
ps.prob([1., 0, 0])
# Initialize a batch of two 3-variate vMF distributions.
mu = [[0., 1, 0],
[1., 0, 0]]
conc = [1., 2]
ps = tfd.PowerSpherical(mean_direction=mu, concentration=conc)
# Evaluate this on two observations, each in S^2, returning a length two
# tensor.
x = [[0., 0, 1],
[0., 1, 0]]
ps.prob(x)
```
#### References
[1] Nicola de Cao, Wilker Aziz. The Power Spherical distribution.
https://arxiv.org/abs/2006.04437.
"""
def __init__(self,
mean_direction,
concentration,
validate_args=False,
allow_nan_stats=True,
name='PowerSpherical'):
"""Creates a new `PowerSpherical` instance.
Args:
mean_direction: Floating-point `Tensor` with shape [B1, ... Bn, N].
A unit vector indicating the mode of the distribution, or the
unit-normalized direction of the mean.
concentration: Floating-point `Tensor` having batch shape [B1, ... Bn]
broadcastable with `mean_direction`. The level of concentration of
samples around the `mean_direction`. `concentration=0` indicates a
uniform distribution over the unit hypersphere, and `concentration=+inf`
indicates a `Deterministic` distribution (delta function) at
`mean_direction`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: For known-bad arguments, i.e. unsupported event dimension.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([mean_direction, concentration],
tf.float32)
self._mean_direction = tensor_util.convert_nonref_to_tensor(
mean_direction, name='mean_direction', dtype=dtype)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, name='concentration', dtype=dtype)
super(PowerSpherical, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
mean_direction=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED),
concentration=parameter_properties.ParameterProperties(
shape_fn=lambda sample_shape: sample_shape[:-1],
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
@property
def mean_direction(self):
"""Mean direction parameter."""
return self._mean_direction
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
def _event_shape_tensor(self, mean_direction=None):
return ps.shape(self.mean_direction if mean_direction is None
else mean_direction)[-1:]
def _event_shape(self):
return tensorshape_util.with_rank(self.mean_direction.shape[-1:], rank=1)
def _log_prob(self, x):
concentration = tf.convert_to_tensor(self.concentration)
return (self._log_unnormalized_prob(x, concentration=concentration) -
self._log_normalization(concentration=concentration))
def _log_unnormalized_prob(self, samples, concentration=None):
if concentration is None:
concentration = tf.convert_to_tensor(self.concentration)
inner_product = tf.reduce_sum(samples * self.mean_direction, axis=-1)
inner_product = tf.clip_by_value(inner_product, -1., 1.)
return tf.math.xlog1py(concentration, inner_product)
def _log_normalization(self, concentration=None, mean_direction=None):
"""Computes the log-normalizer of the distribution."""
if concentration is None:
concentration = tf.convert_to_tensor(self.concentration)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[-1], self.dtype)
concentration1 = concentration + (event_size - 1.) / 2.
concentration0 = (event_size - 1.) / 2.
return ((concentration1 + concentration0) * np.log(2.) +
concentration0 * np.log(np.pi) +
tfp_math.log_gamma_difference(concentration0, concentration1))
def _sample_control_dependencies(self, samples):
"""Check samples for proper shape and whether samples are unit vectors."""
inner_sample_dim = samples.shape[-1]
event_size = self.event_shape[-1]
shape_msg = ('Samples must have innermost dimension matching that of '
'`self.mean_direction`.')
if event_size is not None and inner_sample_dim is not None:
if event_size != inner_sample_dim:
raise ValueError(shape_msg)
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_near(
tf.cast(1., dtype=self.dtype),
tf.linalg.norm(samples, axis=-1),
message='Samples must be unit length.'))
assertions.append(assert_util.assert_equal(
tf.shape(samples)[-1:],
self.event_shape_tensor(),
message=shape_msg))
return assertions
def _mean(self):
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[0], dtype=self.dtype)
return (concentration / (
event_size - 1. + concentration))[..., tf.newaxis] * mean_direction
def _covariance(self):
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[0], dtype=self.dtype)
covariance = -concentration[..., tf.newaxis, tf.newaxis] * tf.linalg.matmul(
mean_direction[..., tf.newaxis],
mean_direction[..., tf.newaxis, :])
covariance = tf.linalg.set_diag(
covariance, tf.linalg.diag_part(covariance) + (
concentration + event_size - 1.)[..., tf.newaxis])
covariance = ((2 * concentration + event_size - 1.)/ (
tf.math.square(concentration + event_size - 1.) * (
concentration + event_size)))[
..., tf.newaxis, tf.newaxis] * covariance
return covariance
def _sample_n(self, n, seed=None):
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
event_size_int = self._event_shape_tensor(
mean_direction=mean_direction)[0]
event_size = tf.cast(event_size_int, dtype=self.dtype)
beta_seed, uniform_seed = samplers.split_seed(seed, salt='power_spherical')
broadcasted_concentration = tf.broadcast_to(
concentration, self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration))
beta = beta_lib.Beta(
(event_size - 1.) / 2. + broadcasted_concentration,
(event_size - 1.) / 2.)
beta_samples = beta.sample(n, seed=beta_seed)
u_shape = ps.concat([[n], self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration)], axis=0)
spherical_samples = random_ops.spherical_uniform(
shape=u_shape,
dimension=event_size_int - 1,
dtype=self.dtype,
seed=uniform_seed)
t = 2. * beta_samples - 1.
y = tf.concat([
t[..., tf.newaxis],
tf.math.sqrt(1. - tf.math.square(t))[
..., tf.newaxis] * spherical_samples], axis=-1)
u = tf.concat(
[(1. - mean_direction[..., 0])[..., tf.newaxis],
-mean_direction[..., 1:]], axis=-1)
# Much like `VonMisesFisher`, we use `l2_normalize` which does
# nothing if the zero vector is passed in, and thus the householder
# reflection will do nothing.
# This is consistent with sampling
# with `mu = [1, 0, 0, ..., 0]` since samples will be of the
# form: [w, sqrt(1 - w**2) * u] = w * mu + sqrt(1 - w**2) * v,
# where:
# * `u` is a unit vector sampled from the unit hypersphere.
# * `v` is `[0, u]`.
# This form is the same as sampling from the tangent-normal decomposition.
u = tf.math.l2_normalize(u, axis=-1)
return tf.math.l2_normalize(
y - 2. * tf.math.reduce_sum(y * u, axis=-1, keepdims=True) * u, axis=-1)
def _entropy(self, concentration=None, mean_direction=None):
concentration = (
tf.convert_to_tensor(self.concentration) if
concentration is None else concentration)
mean_direction = (
tf.convert_to_tensor(self.mean_direction) if
mean_direction is None else mean_direction)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[-1], self.dtype)
concentration1 = concentration + (event_size - 1.) / 2.
concentration0 = (event_size - 1.) / 2.
entropy = (self._log_normalization(
concentration=concentration, mean_direction=mean_direction) -
concentration * (
np.log(2.) + tf.math.digamma(concentration1) -
tf.math.digamma(concentration1 + concentration0)))
return tf.broadcast_to(
entropy, self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration))
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector.
return chain_bijector.Chain([
invert_bijector.Invert(
square_bijector.Square(validate_args=self.validate_args),
validate_args=self.validate_args),
softmax_centered_bijector.SoftmaxCentered(
validate_args=self.validate_args)
], validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
assertions = []
if is_init != tensor_util.is_ref(self._mean_direction):
assertions.append(
assert_util.assert_greater(
tf.shape(mean_direction)[-1],
1,
message='`mean_direction` must be a vector of at least size 2.'))
assertions.append(
assert_util.assert_near(
tf.cast(1., self.dtype),
tf.linalg.norm(mean_direction, axis=-1),
message='`mean_direction` must be unit-length'))
if is_init != tensor_util.is_ref(self._concentration):
assertions.append(
assert_util.assert_non_negative(
concentration, message='`concentration` must be non-negative'))
return assertions
@kullback_leibler.RegisterKL(PowerSpherical, spherical_uniform.SphericalUniform)
def _kl_power_uniform_spherical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b).
Args:
a: instance of a PowerSpherical distribution object.
b: instance of a SphericalUniform distribution object.
name: (optional) Name to use for created operations.
default is "kl_power_uniform_spherical".
Returns:
Batchwise KL(a || b)
Raises:
ValueError: If the two distributions are over spheres of different
dimensions.
#### References
[1] Nicola de Cao, Wilker Aziz. The Power Spherical distribution.
https://arxiv.org/abs/2006.04437.
"""
with tf.name_scope(name or 'kl_power_uniform_spherical'):
msg = (
'Can not compute the KL divergence between a `PowerSpherical` and '
'`SphericalUniform` of different dimensions.')
deps = []
if a.event_shape[-1] is not None:
if a.event_shape[-1] != b.dimension:
raise ValueError(
(msg + 'Got {} vs. {}').format(a.event_shape[-1], b.dimension))
elif a.validate_args or b.validate_args:
deps += [assert_util.assert_equal(
a.event_shape_tensor()[-1], b.dimension, message=msg)]
with tf.control_dependencies(deps):
return b.entropy() - a.entropy()
@kullback_leibler.RegisterKL(PowerSpherical, von_mises_fisher.VonMisesFisher)
def _kl_power_spherical_vmf(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b).
Args:
a: instance of a PowerSpherical distribution object.
b: instance of a VonMisesFisher distribution object.
name: (optional) Name to use for created operations.
default is "kl_power_spherical_vmf".
Returns:
Batchwise KL(a || b)
Raises:
ValueError: If the two distributions are over spheres of different
dimensions.
#### References
[1] Nicola de Cao, Wilker Aziz. The Power Spherical distribution.
https://arxiv.org/abs/2006.04437.
"""
with tf.name_scope(name or 'kl_power_spherical_vmf'):
msg = (
'Can not compute the KL divergence between a `PowerSpherical` and '
'`VonMisesFisher` of different dimensions.')
deps = []
if a.event_shape[-1] is not None and b.event_shape[-1] is not None:
if a.event_shape[-1] != b.event_shape[-1]:
raise ValueError(
(msg + 'Got {} vs. {}').format(
a.event_shape[-1], b.event_shape[-1]))
elif a.validate_args or b.validate_args:
deps += [assert_util.assert_equal(
a.event_shape_tensor()[-1], b.event_shape_tensor()[-1], message=msg)]
with tf.control_dependencies(deps):
a_mean_direction = tf.convert_to_tensor(a.mean_direction)
a_concentration = tf.convert_to_tensor(a.concentration)
b_mean_direction = tf.convert_to_tensor(b.mean_direction)
b_concentration = tf.convert_to_tensor(b.concentration)
event_size = tf.cast(a._event_shape_tensor( # pylint:disable=protected-access
mean_direction=a_mean_direction)[-1], a.dtype)
kl = (-a._entropy(concentration=a_concentration, # pylint:disable=protected-access
mean_direction=a_mean_direction) +
b._log_normalization( # pylint:disable=protected-access
concentration=b_concentration) -
a_concentration * b_concentration * tf.reduce_sum(
a_mean_direction * b_mean_direction, axis=-1) / (
a_concentration + event_size - 1.))
return kl
| 40.802128
| 97
| 0.693852
|
947e14cc79896dc87bbcbaadccb9e1587c7da609
| 7,756
|
py
|
Python
|
p65/Ophis/Opcodes.py
|
JixunMoe/ContraNES1TranslationPatch
|
0cc514e8badd4ac872bff82d3f566fb97fe86685
|
[
"BSD-3-Clause"
] | 1
|
2020-07-30T08:57:33.000Z
|
2020-07-30T08:57:33.000Z
|
p65/Ophis/Opcodes.py
|
jixunmoe/ContraNES1TranslationPatch
|
0cc514e8badd4ac872bff82d3f566fb97fe86685
|
[
"BSD-3-Clause"
] | null | null | null |
p65/Ophis/Opcodes.py
|
jixunmoe/ContraNES1TranslationPatch
|
0cc514e8badd4ac872bff82d3f566fb97fe86685
|
[
"BSD-3-Clause"
] | null | null | null |
"""6502 and 6510 opcodes
Tables for the assembly of 6502 and 6510 instructions, mapping
opcodes and addressing modes to binary instructions. Includes
the undocumented 6510 ops, as described in the VICE manuals."""
# Copyright 2002 Michael C. Martin.
# You may use, modify, and distribute this file under the BSD
# license: See LICENSE.txt for details.
# Names of addressing modes
modes = ["Implied", # 0
"Immediate", # 1
"Zero Page", # 2
"Zero Page, X", # 3
"Zero Page, Y", # 4
"Absolute", # 5
"Absolute, X", # 6
"Absolute, Y", # 7
"(Indirect)", # 8
"(Indirect, X)", # 9
"(Indirect), Y", # 10
"Relative"] # 11
# Lengths of the argument
lengths = [0, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1]
# IMPL IMME ZP ZP-X ZP-Y ABS ABSX ABSY IND INDX INDY REL
opcodes = { "adc":(None, 0x69, 0x65, 0x75, None, 0x6D, 0x7D, 0x79, None, 0x61, 0x71, None),
"and":(None, 0x29, 0x25, 0x35, None, 0x2D, 0x3D, 0x39, None, 0x21, 0x31, None),
"asl":(0x0A, None, 0x06, 0x16, None, 0x0E, 0x1E, None, None, None, None, None),
"bcc":(None, None, None, None, None, None, None, None, None, None, None, 0x90),
"bcs":(None, None, None, None, None, None, None, None, None, None, None, 0xB0),
"beq":(None, None, None, None, None, None, None, None, None, None, None, 0xF0),
"bit":(None, None, 0x24, None, None, 0x2C, None, None, None, None, None, None),
"bmi":(None, None, None, None, None, None, None, None, None, None, None, 0x30),
"bne":(None, None, None, None, None, None, None, None, None, None, None, 0xD0),
"bpl":(None, None, None, None, None, None, None, None, None, None, None, 0x10),
"brk":(0x00, None, None, None, None, None, None, None, None, None, None, None),
"bvc":(None, None, None, None, None, None, None, None, None, None, None, 0x50),
"bvs":(None, None, None, None, None, None, None, None, None, None, None, 0x70),
"clc":(0x18, None, None, None, None, None, None, None, None, None, None, None),
"cld":(0xD8, None, None, None, None, None, None, None, None, None, None, None),
"cli":(0x58, None, None, None, None, None, None, None, None, None, None, None),
"clv":(0xB8, None, None, None, None, None, None, None, None, None, None, None),
"cmp":(None, 0xC9, 0xC5, 0xD5, None, 0xCD, 0xDD, 0xD9, None, 0xC1, 0xD1, None),
"cpx":(None, 0xE0, 0xE4, None, None, 0xEC, None, None, None, None, None, None),
"cpy":(None, 0xC0, 0xC4, None, None, 0xCC, None, None, None, None, None, None),
"dec":(None, None, 0xC6, 0xD6, None, 0xCE, 0xDE, None, None, None, None, None),
"dex":(0xCA, None, None, None, None, None, None, None, None, None, None, None),
"dey":(0x88, None, None, None, None, None, None, None, None, None, None, None),
"eor":(None, 0x49, 0x45, 0x55, None, 0x4D, 0x5D, 0x59, None, 0x41, 0x51, None),
"inc":(None, None, 0xE6, 0xF6, None, 0xEE, 0xFE, None, None, None, None, None),
"inx":(0xE8, None, None, None, None, None, None, None, None, None, None, None),
"iny":(0xC8, None, None, None, None, None, None, None, None, None, None, None),
"jmp":(None, None, None, None, None, 0x4C, None, None, 0x6C, None, None, None),
"jsr":(None, None, None, None, None, 0x20, None, None, None, None, None, None),
"lda":(None, 0xA9, 0xA5, 0xB5, None, 0xAD, 0xBD, 0xB9, None, 0xA1, 0xB1, None),
"ldx":(None, 0xA2, 0xA6, None, 0xB6, 0xAE, None, 0xBE, None, None, None, None),
"ldy":(None, 0xA0, 0xA4, 0xB4, None, 0xAC, 0xBC, None, None, None, None, None),
"lsr":(0x4A, None, 0x46, 0x56, None, 0x4E, 0x5E, None, None, None, None, None),
"nop":(0xEA, None, None, None, None, None, None, None, None, None, None, None),
"ora":(None, 0x09, 0x05, 0x15, None, 0x0D, 0x1D, 0x19, None, 0x01, 0x11, None),
"pha":(0x48, None, None, None, None, None, None, None, None, None, None, None),
"php":(0x08, None, None, None, None, None, None, None, None, None, None, None),
"pla":(0x68, None, None, None, None, None, None, None, None, None, None, None),
"plp":(0x28, None, None, None, None, None, None, None, None, None, None, None),
"rol":(0x2A, None, 0x26, 0x36, None, 0x2E, 0x3E, None, None, None, None, None),
"ror":(0x6A, None, 0x66, 0x76, None, 0x6E, 0x7E, None, None, None, None, None),
"rti":(0x40, None, None, None, None, None, None, None, None, None, None, None),
"rts":(0x60, None, None, None, None, None, None, None, None, None, None, None),
"sbc":(None, 0xE9, 0xE5, 0xF5, None, 0xED, 0xFD, 0xF9, None, 0xE1, 0xF1, None),
"sec":(0x38, None, None, None, None, None, None, None, None, None, None, None),
"sed":(0xF8, None, None, None, None, None, None, None, None, None, None, None),
"sei":(0x78, None, None, None, None, None, None, None, None, None, None, None),
"sta":(None, None, 0x85, 0x95, None, 0x8D, 0x9D, 0x99, None, 0x81, 0x91, None),
"stx":(None, None, 0x86, None, 0x96, 0x8E, None, None, None, None, None, None),
"sty":(None, None, 0x84, 0x94, None, 0x8C, None, None, None, None, None, None),
"tax":(0xAA, None, None, None, None, None, None, None, None, None, None, None),
"tay":(0xA8, None, None, None, None, None, None, None, None, None, None, None),
"tsx":(0xBA, None, None, None, None, None, None, None, None, None, None, None),
"txa":(0x8A, None, None, None, None, None, None, None, None, None, None, None),
"txs":(0x9A, None, None, None, None, None, None, None, None, None, None, None),
"tya":(0x98, None, None, None, None, None, None, None, None, None, None, None) }
undocops = { "anc":(None, 0x0B, None, None, None, None, None, None, None, None, None, None),
"ane":(None, 0x8B, None, None, None, None, None, None, None, None, None, None),
"arr":(None, 0x6B, None, None, None, None, None, None, None, None, None, None),
"asr":(None, 0x4B, None, None, None, None, None, None, None, None, None, None),
"dcp":(None, None, 0xC7, 0xD7, None, 0xCF, 0xDF, 0xDB, None, 0xC3, 0xD3, None),
"isb":(None, None, 0xE7, 0xF7, None, 0xEF, 0xFF, 0xFB, None, 0xE3, 0xF3, None),
"las":(None, None, None, None, None, None, None, 0xBB, None, None, None, None),
"lax":(None, None, 0xA7, None, 0xB7, 0xAF, None, 0xBF, None, 0xA3, 0xB3, None),
"lxa":(None, 0xAB, None, None, None, None, None, None, None, None, None, None),
"rla":(None, None, 0x27, 0x37, None, 0x2F, 0x3F, 0x3B, None, 0x23, 0x33, None),
"rra":(None, None, 0x67, 0x77, None, 0x6F, 0x7F, 0x7B, None, 0x63, 0x73, None),
"sax":(None, None, 0x87, None, 0x97, 0x8F, None, None, None, 0x83, None, None),
"sbx":(None, 0xCB, None, None, None, None, None, None, None, None, None, None),
"sha":(None, None, None, None, None, None, None, 0x9F, None, None, 0x93, None),
"shs":(None, None, None, None, None, None, None, 0x9B, None, None, None, None),
"shx":(None, None, None, None, None, None, None, 0x7E, None, None, None, None),
"slo":(None, None, 0x07, 0x17, None, 0x0F, 0x1F, 0x1B, None, 0x03, 0x13, None),
"sre":(None, None, 0x47, 0x57, None, 0x4F, 0x5F, 0x5B, None, 0x43, 0x53, None)}
| 73.866667
| 93
| 0.552862
|
17f874878d7bd7d1e93989f60f26a27d6d25ed48
| 9,754
|
py
|
Python
|
appengine/chromium_bugs/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chromium_bugs/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/chromium_bugs/main.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import re
import urllib
import urlparse
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import util
# pylint warning disabled until httpagentparser can be added to the wheelhouse.
# http://crbug.com/410984
import httpagentparser # pylint: disable=F0401
import settings
from third_party import ezt
WIZARD_TEMPLATE_PATH = 'templates/wizard.ezt'
WIZARD_HTML_TEMPLATE = ezt.Template(WIZARD_TEMPLATE_PATH)
legacy_template = """Chrome Version : %s
OS Version: %s
URLs (if applicable) :
Other browsers tested:
Add OK or FAIL after other browsers where you have tested this issue:
Safari 5:
Firefox 4.x:
IE 7/8/9:
What steps will reproduce the problem?
1.
2.
3.
What is the expected result?
What happens instead%s?
Please provide any additional information below. Attach a screenshot if
possible.
%s
"""
DEFAULT_BUG_TEMPLATE_NAME = 'Defect%20report%20from%20user'
MAC_BUG_TEMPLATE_NAME = 'Defect%20on%20Mac%20OS'
LINUX_BUG_TEMPLATE_NAME = 'Defect%20on%20Linux'
CHROME_OS_BUG_TEMPLATE_NAME = 'Defect%20on%20Chrome%20OS'
WINDOWS_BUG_TEMPLATE_NAME = 'Defect%20on%20Windows'
MISSING_TOKEN_HTML = (
'<html><body>'
'<h1>Not signed in</h1>'
'<p>Please go back and sign in to bugs.chromium.org before '
'using this wizard.</p>'
''
'</body></html>'
)
# The continue_url domain must match with one of these.
ALLOWED_CONTINUE_DOMAINS = [
re.compile('^localhost:8080$'),
re.compile('^code.google.com$'),
re.compile('^bugs(-staging)?.chromium.org$'),
re.compile('^([-a-z0-9.]+-dot-)?monorail-(prod|staging).appspot.com$'),
]
INVALID_CONTINUE_HTML = (
'<html><body>'
'<h1>Invalid continue parameter</h1>'
'<p>This wizard can only be used with '
'bugs.chromium.org.</p>'
''
'</body></html>'
)
class MainHandler(webapp2.RequestHandler):
def get(self):
uas = self.request.headers['User-Agent']
role = self.request.get('role')
continue_url = self.request.get('continue')
token = self.request.get('token')
self.response.headers.add(
'Strict-Transport-Security', 'max-age=31536000; includeSubDomains')
if continue_url and not token:
logging.info('Missing token')
self.response.out.write(MISSING_TOKEN_HTML)
return
if not continue_url:
continue_url = 'https://bugs.chromium.org/p/chromium/issues/entry.do'
# Special case, chromium-os issues are now being tracked in /p/chromium.
if '//code.google.com/p/chromium-os/issues/entry.do' in continue_url:
continue_url = 'https://bugs.chromium.org/p/chromium/issues/entry.do'
parsed = urlparse.urlparse(continue_url)
continue_is_allowed = any(
regex.match(parsed.netloc) for regex in ALLOWED_CONTINUE_DOMAINS)
if not continue_is_allowed:
logging.info('Bad continue param: %r', continue_url)
self.response.out.write(INVALID_CONTINUE_HTML)
return
if '?' in continue_url:
# Codesite includes contextual parameters for search terms, etc.
validate_url = continue_url.split('?')[0]
else:
validate_url = continue_url
if not validate_url.endswith('.do'):
logging.info('validate_url does not end in .do: %r', validate_url)
self.response.out.write(
'Malformed "continue" query string parameter: %r' %
urllib.quote(validate_url))
return
issue_entry_page_url = validate_url[:-3]
user = users.get_current_user()
if role or (user and re.match(
r".*?@chromium\.org\Z", user.email(), re.DOTALL | re.IGNORECASE)):
self.redirect(issue_entry_page_url.encode('utf8'))
return
ua = httpagentparser.detect(uas)
name = ''
os_version = ''
browser = None
browser_version = None
chrome_version = "<Copy from: 'about:version'>"
chrome_ua = ""
template_name = DEFAULT_BUG_TEMPLATE_NAME
# Mac
# {'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'},
# 'os': {'name': 'Macintosh'},
# 'browser': {'version': '11.0.696.16', 'name': 'Chrome'}}
# Win
# {'os': {'version': 'NT 6.1', 'name': 'Windows'},
# 'browser': {'version': '11.0.696.16', 'name': 'Chrome'}}
if ua:
if ua.has_key('os') and ua['os'].has_key('name'):
name = ua['os']['name']
if name == 'Windows':
if 'version' in ua['os']:
os_version = ua['os']['version']
else:
os_version = 'Unknown'
match = re.search(
r"(\d+\.\d+)", os_version, re.DOTALL | re.IGNORECASE)
if match:
version = match.group(1)
else:
version = ''
if version == '6.2':
os_version += ' (Windows 8)'
elif version == '6.1':
os_version += ' (Windows 7, Windows Server 2008 R2)'
elif version == '6.0':
os_version += ' (Windows Vista, Windows Server 2008)'
elif version == '5.2':
os_version += ' (Windows Server 2003, Windows XP 64)'
elif version == '5.1':
os_version += ' (Windows XP)'
elif version == '5.0':
os_version += ' (Windows 2000)'
template_name = WINDOWS_BUG_TEMPLATE_NAME
elif name == 'Macintosh':
template_name = MAC_BUG_TEMPLATE_NAME
if ua.has_key('flavor') and ua['flavor'].has_key('version'):
os_version = ua['flavor']['version']
elif name == 'Linux':
template_name = LINUX_BUG_TEMPLATE_NAME
# We might be able to do flavors
elif name == 'ChromeOS':
template_name = CHROME_OS_BUG_TEMPLATE_NAME
os_version = ua['os']['version']
if ua.has_key('browser'):
browser = ua['browser']['name']
browser_version = ua['browser']['version']
if browser == "Chrome":
chrome_version = browser_version
chrome_ua = '\nUserAgentString: %s\n' % uas
if not token or self.ShouldDoLegacyBehavior(browser, browser_version):
# Allow us to measure number of users who came through new.crbug.com
# by putting in a phrase that we can query for: "instead of that".
# Also, when bugs.chromium.org is in a scheduled read-only period, direct
# users straight to the classic issue entry page.
detectable_phrase = '' if token else ' of that'
comment = legacy_template % (
chrome_version, os_version, detectable_phrase, chrome_ua)
url = (issue_entry_page_url + '?template=' + template_name + '&' +
urllib.urlencode({'comment': comment}))
self.redirect(url.encode('utf8'))
return
channel_guess_os_name = {
'macintosh': 'mac',
'windows': 'win',
'linux': 'linux',
'ios': 'ios',
'chromeframe': 'cf',
'chromeos': 'cros',
# Android cannot be guessed.
}.get(name.lower(), name.lower())
app_version = os.environ.get('CURRENT_VERSION_ID')
page_data = {
'app_version': app_version,
'chrome_version': chrome_version,
'channel_guess_os_name': channel_guess_os_name,
'os_name': name,
'os_version': os_version,
'chrome_ua': chrome_ua,
'continue_url': continue_url,
'token': token,
}
# TODO(jrobbins): Use WIZARD_HTML_TEMPLATE for speed.
ezt.Template(WIZARD_TEMPLATE_PATH, base_format=ezt.FORMAT_HTML).generate(
self.response.out, page_data)
# pylint: disable=R0201
def ShouldDoLegacyBehavior(self, browser, version):
"""Return True if this request should produce the old templat+UA behavior.
This feature is intended to allow A/B testing so that we can measure how
the new issue wizard affects user behavior, report quantity, and quality.
"""
# We have a lot of old data that we can use for comparison, so let's
# just forget about experiments for now. If we need to do one, we
# could deploy a different version of the app for a period of time.
# token = self.request.get('token')
# if hash(token) % 100 < 10: # 10% experiment
# logging.info('routing user to non-wizard')
# return True
# Old versions of IE do not support pushState, send them through
# the legacy issue entry page.
try:
version = version or '0'
version_number = int(version.split('.')[0])
except ValueError:
version_number = 0
if browser == 'Microsoft Internet Explorer' and version_number < 10:
return True
# If the site is read-only, let the user see that error message.
# If the site is read-write during a scheduled read-only window,
# users will still be able to enter issue via the classic issue form.
for start, duration in settings.READ_ONLY_WINDOWS:
now = datetime.datetime.utcnow()
if start < now < start + duration:
logging.info('Site is scheduled to be in read-only mode %r < %r < %r',
start, now, start + duration)
return True
return False
application = webapp2.WSGIApplication(
[('/', MainHandler),
('/wizard.html', MainHandler),
('/wizard.do', MainHandler)],
debug=True)
| 33.064407
| 79
| 0.644761
|
04abe2e4bb08fede9cd522c7eef469cd1914bbed
| 1,423
|
py
|
Python
|
geonode_mapstore_client/context_processors.py
|
majid-saeed/geonode-mapstore-client
|
2580014a52e41089d29c2211ba89c50ed936598a
|
[
"BSD-2-Clause-FreeBSD"
] | 8
|
2020-12-07T13:55:49.000Z
|
2022-01-27T15:53:58.000Z
|
geonode_mapstore_client/context_processors.py
|
majid-saeed/geonode-mapstore-client
|
2580014a52e41089d29c2211ba89c50ed936598a
|
[
"BSD-2-Clause-FreeBSD"
] | 256
|
2019-07-18T12:17:04.000Z
|
2022-03-31T07:52:44.000Z
|
geonode_mapstore_client/context_processors.py
|
majid-saeed/geonode-mapstore-client
|
2580014a52e41089d29c2211ba89c50ed936598a
|
[
"BSD-2-Clause-FreeBSD"
] | 50
|
2019-08-23T09:17:18.000Z
|
2022-03-31T12:19:37.000Z
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright 2018, GeoSolutions Sas.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
#########################################################################
from django.conf import settings
def resource_urls(request):
"""Global values to pass to templates"""
defaults = dict(
GEOAPPS = ['GeoStory', 'GeoDashboard']
)
defaults['GEONODE_SETTINGS'] = {
'MAP_BASELAYERS': getattr(settings, "MAPSTORE_BASELAYERS", []),
'MAP_BASELAYERS_SOURCES': getattr(settings, "MAPSTORE_BASELAYERS_SOURCES", {}),
'CATALOGUE_SERVICES': getattr(settings, "MAPSTORE_CATALOGUE_SERVICES", {}),
'CATALOGUE_SELECTED_SERVICE': getattr(settings, "MAPSTORE_CATALOGUE_SELECTED_SERVICE", None),
'DEFAULT_MAP_CENTER_X': getattr(settings, "DEFAULT_MAP_CENTER_X", 0),
'DEFAULT_MAP_CENTER_Y': getattr(settings, "DEFAULT_MAP_CENTER_Y", 0),
'DEFAULT_MAP_CRS': getattr(settings, "DEFAULT_MAP_CRS", 'EPSG:3857'),
'DEFAULT_MAP_ZOOM': getattr(settings, "DEFAULT_MAP_ZOOM", 0),
'DEFAULT_TILE_SIZE': getattr(settings, "DEFAULT_TILE_SIZE", 512),
'DEFAULT_LAYER_FORMAT': getattr(settings, "DEFAULT_LAYER_FORMAT", 'image/png')
}
return defaults
| 44.46875
| 101
| 0.62825
|
8394920fc14279951b1e0c131de0f172ad84b0bb
| 1,778
|
py
|
Python
|
tests/rules/test_git_two_dashes.py
|
samzhang111/oops
|
5823623f94f7c4cdeccea4938c1a0efd4280184e
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_two_dashes.py
|
samzhang111/oops
|
5823623f94f7c4cdeccea4938c1a0efd4280184e
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_two_dashes.py
|
samzhang111/oops
|
5823623f94f7c4cdeccea4938c1a0efd4280184e
|
[
"MIT"
] | null | null | null |
import pytest
from theoops.rules.git_two_dashes import match, get_new_command
from tests.utils import Command
@pytest.fixture
def stderr(meant):
return 'error: did you mean `%s` (with two dashes ?)' % meant
@pytest.mark.parametrize('command', [
Command(script='git add -patch', stderr=stderr('--patch')),
Command(script='git checkout -patch', stderr=stderr('--patch')),
Command(script='git commit -amend', stderr=stderr('--amend')),
Command(script='git push -tags', stderr=stderr('--tags')),
Command(script='git rebase -continue', stderr=stderr('--continue'))])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command(script='git add --patch'),
Command(script='git checkout --patch'),
Command(script='git commit --amend'),
Command(script='git push --tags'),
Command(script='git rebase --continue')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, output', [
(Command(script='git add -patch', stderr=stderr('--patch')),
'git add --patch'),
(Command(script='git checkout -patch', stderr=stderr('--patch')),
'git checkout --patch'),
(Command(script='git checkout -patch', stderr=stderr('--patch')),
'git checkout --patch'),
(Command(script='git init -bare', stderr=stderr('--bare')),
'git init --bare'),
(Command(script='git commit -amend', stderr=stderr('--amend')),
'git commit --amend'),
(Command(script='git push -tags', stderr=stderr('--tags')),
'git push --tags'),
(Command(script='git rebase -continue', stderr=stderr('--continue')),
'git rebase --continue')])
def test_get_new_command(command, output):
assert get_new_command(command) == output
| 37.041667
| 73
| 0.654106
|
cd6c8f59e710458a675f8d773da12d20a35c7723
| 1,233
|
py
|
Python
|
cupy/cupy_raw_kernel_addition/cupy_raw_kernel_addition.py
|
gschramm/python_tutorials
|
14369b15511fa1affdab78335d1c06c4ff2fb90b
|
[
"Apache-2.0"
] | null | null | null |
cupy/cupy_raw_kernel_addition/cupy_raw_kernel_addition.py
|
gschramm/python_tutorials
|
14369b15511fa1affdab78335d1c06c4ff2fb90b
|
[
"Apache-2.0"
] | null | null | null |
cupy/cupy_raw_kernel_addition/cupy_raw_kernel_addition.py
|
gschramm/python_tutorials
|
14369b15511fa1affdab78335d1c06c4ff2fb90b
|
[
"Apache-2.0"
] | null | null | null |
# minimal example showing how to use raw (external) CUDA kernels with cupy
#
# Aim: unerstand how to load and execute a raw kernel based on addition of two arrays
import cupy as cp
import numpy as np
import math
from functools import reduce
# load a kernel defined in a external file
with open('add_kernel.cu','r') as f:
add_kernel = cp.RawKernel(f.read(), 'my_add')
#-------------------------------------------------------------
cp.random.seed(1)
# shape of random arrays
shape = (55,55,55)
# number of elemnts of arrays
n = reduce(lambda x,y: x*y, shape)
# define number of threads per block and and calculate number of blocks per grid
threads_per_block = 64
blocks_per_grid = math.ceil(n/threads_per_block)
# define two random device arrays
xd = cp.random.rand(*shape).astype(cp.float32)
yd = cp.random.rand(*shape).astype(cp.float32)
# device array for output
zd = cp.zeros(shape, dtype=cp.float32)
# execute the kernel
add_kernel((blocks_per_grid,), (threads_per_block,), (xd, yd, zd, n)) # grid, block and arguments
# print first 5 elements
print(xd.ravel()[:5])
print(yd.ravel()[:5])
print(zd.ravel()[:5])
# check against numpy addition
assert np.allclose(cp.asnumpy(zd), cp.asnumpy(xd) + cp.asnumpy(yd))
| 28.674419
| 98
| 0.691809
|
17741225ab8534954046466cdd86cd72387b723c
| 2,934
|
py
|
Python
|
haystack/nodes/file_classifier/file_type.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-06T02:13:15.000Z
|
2022-03-06T02:13:15.000Z
|
haystack/nodes/file_classifier/file_type.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | null | null | null |
haystack/nodes/file_classifier/file_type.py
|
ArzelaAscoIi/haystack
|
be8f50c9e3de4e264b3f345f5f4b9c9ec518ed08
|
[
"Apache-2.0"
] | 1
|
2022-03-23T18:17:02.000Z
|
2022-03-23T18:17:02.000Z
|
from multiprocessing.sharedctypes import Value
from typing import List, Union
from pathlib import Path
from haystack.nodes.base import BaseComponent
DEFAULT_TYPES = ["txt", "pdf", "md", "docx", "html"]
class FileTypeClassifier(BaseComponent):
"""
Route files in an Indexing Pipeline to corresponding file converters.
"""
outgoing_edges = 10
def __init__(self, supported_types: List[str] = DEFAULT_TYPES):
"""
Node that sends out files on a different output edge depending on their extension.
:param supported_types: the file types that this node can distinguish.
Note that it's limited to a maximum of 10 outgoing edges, which
correspond each to a file extension. Such extension are, by default
`txt`, `pdf`, `md`, `docx`, `html`. Lists containing more than 10
elements will not be allowed. Lists with duplicate elements will
also be rejected.
"""
if len(supported_types) > 10:
raise ValueError("supported_types can't have more than 10 values.")
if len(set(supported_types)) != len(supported_types):
raise ValueError("supported_types can't contain duplicate values.")
self.set_config(supported_types=supported_types)
self.supported_types = supported_types
def _get_extension(self, file_paths: List[Path]) -> str:
"""
Return the extension found in the given list of files.
Also makes sure that all files have the same extension.
If this is not true, it throws an exception.
:param file_paths: the paths to extract the extension from
:return: a set of strings with all the extensions (without duplicates)
"""
extension = file_paths[0].suffix
for path in file_paths:
if path.suffix != extension:
raise ValueError(f"Multiple file types are not allowed at once.")
return extension.lstrip(".")
def run(self, file_paths: Union[Path, List[Path], str, List[str], List[Union[Path, str]]]): # type: ignore
"""
Sends out files on a different output edge depending on their extension.
:param file_paths: paths to route on different edges.
"""
if not isinstance(file_paths, list):
file_paths = [file_paths]
paths = [Path(path) for path in file_paths]
output = {"file_paths": paths}
extension = self._get_extension(paths)
try:
index = self.supported_types.index(extension) + 1
except ValueError:
raise ValueError(
f"Files of type '{extension}' are not supported. "
f"The supported types are: {self.supported_types}. "
"Consider using the 'supported_types' parameter to "
"change the types accepted by this node."
)
return output, f"output_{index}"
| 38.605263
| 111
| 0.639059
|
15b50e7f45fe06d206065cb4ea2bf93a798c04f1
| 3,470
|
py
|
Python
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_seesaw/keypad.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 47
|
2021-02-15T23:02:36.000Z
|
2022-03-04T21:30:03.000Z
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_seesaw/keypad.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 7
|
2021-02-19T20:00:08.000Z
|
2022-01-14T10:51:12.000Z
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_seesaw/keypad.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 14
|
2021-02-20T17:40:56.000Z
|
2022-01-01T19:53:38.000Z
|
# SPDX-FileCopyrightText: 2018 Dean Miller for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# pylint: disable=missing-docstring,invalid-name,too-many-public-methods
"""
`adafruit_seesaw.keypad`
====================================================
"""
try:
from micropython import const
except ImportError:
def const(x):
return x
from adafruit_seesaw.seesaw import Seesaw
__version__ = "1.7.1"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_seesaw.git"
_KEYPAD_BASE = const(0x10)
_KEYPAD_STATUS = const(0x00)
_KEYPAD_EVENT = const(0x01)
_KEYPAD_INTENSET = const(0x02)
_KEYPAD_INTENCLR = const(0x03)
_KEYPAD_COUNT = const(0x04)
_KEYPAD_FIFO = const(0x10)
# pylint: disable=too-few-public-methods
class KeyEvent:
"""Holds information about a key event in its properties
:param int num: The number of the key
:param int edge: One of the EDGE propertes of `adafruit_seesaw.keypad.Keypad`
"""
def __init__(self, num, edge):
self.number = int(num)
self.edge = int(edge)
# pylint: enable=too-few-public-methods
class Keypad(Seesaw):
"""On compatible SeeSaw devices, reads from a keypad.
:param ~busio.I2C i2c_bus: Bus the SeeSaw is connected to
:param int addr: I2C address of the SeeSaw device
:param ~digitalio.DigitalInOut drdy: Pin connected to SeeSaw's 'ready' output"""
#: Indicates that the key is currently pressed
EDGE_HIGH = 0
#: Indicates that the key is currently released
EDGE_LOW = 1
#: Indicates that the key was recently pressed
EDGE_FALLING = 2
#: Indicates that the key was recently released
EDGE_RISING = 3
def __init__(self, i2c_bus, addr=0x49, drdy=None):
super().__init__(i2c_bus, addr, drdy)
self._interrupt_enabled = False
@property
def interrupt_enabled(self):
"""Retrieve or set the interrupt enable flag"""
return self._interrupt_enabled
@interrupt_enabled.setter
def interrupt_enabled(self, value):
if value not in (True, False):
raise ValueError("interrupt_enabled must be True or False")
self._interrupt_enabled = value
if value:
self.write8(_KEYPAD_BASE, _KEYPAD_INTENSET, 1)
else:
self.write8(_KEYPAD_BASE, _KEYPAD_INTENCLR, 1)
@property
def count(self):
"""Retrieve or set the number of keys"""
return self.read8(_KEYPAD_BASE, _KEYPAD_COUNT)
# pylint: disable=unused-argument, no-self-use
@count.setter
def count(self, value):
raise AttributeError("count is read only")
# pylint: enable=unused-argument, no-self-use
def set_event(self, key, edge, enable):
"""Control which kinds of events are set
:param int key: The key number
:param int edge: The type of event
:param bool enable: True to enable the event, False to disable it"""
if enable not in (True, False):
raise ValueError("event enable must be True or False")
if edge > 3 or edge < 0:
raise ValueError("invalid edge")
cmd = bytearray(2)
cmd[0] = key
cmd[1] = (1 << (edge + 1)) | enable
self.write(_KEYPAD_BASE, _KEYPAD_EVENT, cmd)
def read_keypad(self, num):
"""Read data from the keypad
:param int num: The number of bytes to read"""
ret = bytearray(num)
self.read(_KEYPAD_BASE, _KEYPAD_FIFO, ret)
return ret
| 28.211382
| 84
| 0.655908
|
80f010c522276bac34639e47726d5a7ef923927f
| 4,309
|
py
|
Python
|
env/lib/python3.8/site-packages/hdfs/ext/kerberos.py
|
paulowe/apache-beam-redocumentation
|
d1b0f345d8e46f9893f56c2bb890edc07be09f2a
|
[
"MIT"
] | null | null | null |
env/lib/python3.8/site-packages/hdfs/ext/kerberos.py
|
paulowe/apache-beam-redocumentation
|
d1b0f345d8e46f9893f56c2bb890edc07be09f2a
|
[
"MIT"
] | null | null | null |
env/lib/python3.8/site-packages/hdfs/ext/kerberos.py
|
paulowe/apache-beam-redocumentation
|
d1b0f345d8e46f9893f56c2bb890edc07be09f2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
"""Support for clusters using Kerberos_ authentication.
This extension adds a new :class:`hdfs.client.Client` subclass,
:class:`KerberosClient`, which handles authentication appropriately with
Kerberized clusters:
.. code-block:: python
from hdfs.ext.kerberos import KerberosClient
client = KerberosClient('http://host:port')
To expose this class to the command line interface (so that it can be used by
aliases), we add the following line inside the `global` section of
`~/.hdfscli.cfg` (or wherever our configuration file is located):
.. code-block:: cfg
autoload.modules = hdfs.ext.kerberos
Here is what our earlier configuration would look like if we updated it to
support a Kerberized production grid:
.. code-block:: cfg
[global]
default.alias = dev
autoload.modules = hdfs.ext.kerberos
[dev.alias]
url = http://dev.namenode:port
[prod.alias]
url = http://prod.namenode:port
client = KerberosClient
.. _Kerberos: http://web.mit.edu/kerberos/
"""
from ..client import Client
from ..util import HdfsError
from six import string_types
from threading import Lock, Semaphore
from time import sleep, time
import requests as rq
import requests_kerberos # For mutual authentication globals.
class _HdfsHTTPKerberosAuth(requests_kerberos.HTTPKerberosAuth):
"""Kerberos authenticator which throttles authentication requests.
Without it, authentication will otherwise fail if too many concurrent
requests are being made. To avoid replay errors, a timeout of 1 ms is also
enforced between requests.
"""
_delay = 0.001 # Seconds.
def __init__(self, max_concurrency, **kwargs):
self._lock = Lock()
self._sem = Semaphore(max_concurrency)
self._timestamp = time() - self._delay
super(_HdfsHTTPKerberosAuth, self).__init__(**kwargs)
def __call__(self, req):
with self._sem:
with self._lock:
delay = self._timestamp + self._delay - time()
if delay > 0:
sleep(delay) # Avoid replay errors.
self._timestamp = time()
return super(_HdfsHTTPKerberosAuth, self).__call__(req)
class KerberosClient(Client):
r"""HDFS web client using Kerberos authentication.
:param url: Hostname or IP address of HDFS namenode, prefixed with protocol,
followed by WebHDFS port on namenode.
:param mutual_auth: Whether to enforce mutual authentication or not (possible
values: `'REQUIRED'`, `'OPTIONAL'`, `'DISABLED'`).
:param max_concurrency: Maximum number of allowed concurrent authentication
requests. This is required since requests exceeding the threshold allowed
by the server will be unable to authenticate.
:param proxy: User to proxy as.
:param root: Root path, this will be prefixed to all HDFS paths passed to the
client. If the root is relative, the path will be assumed relative to the
user's home directory.
:param timeout: Connection timeouts, forwarded to the request handler. How
long to wait for the server to send data before giving up, as a float, or a
`(connect_timeout, read_timeout)` tuple. If the timeout is reached, an
appropriate exception will be raised. See the requests_ documentation for
details.
:param session: `requests.Session` instance, used to emit all requests.
:param \*\*kwargs: Additional arguments passed to the underlying
:class:`~requests_kerberos.HTTPKerberosAuth` class.
To avoid replay errors, a timeout of 1 ms is enforced between requests. If a
session argument is passed in, it will be modified in-place to support
authentication.
"""
def __init__(self, url, mutual_auth='OPTIONAL', max_concurrency=1, root=None,
proxy=None, timeout=None, session=None, **kwargs):
# We allow passing in a string as mutual authentication value.
if isinstance(mutual_auth, string_types):
try:
mutual_auth = getattr(requests_kerberos, mutual_auth)
except AttributeError:
raise HdfsError('Invalid mutual authentication type: %r', mutual_auth)
kwargs['mutual_authentication'] = mutual_auth
if not session:
session = rq.Session()
session.auth = _HdfsHTTPKerberosAuth(int(max_concurrency), **kwargs)
super(KerberosClient, self).__init__(
url, root=root, proxy=proxy, timeout=timeout, session=session
)
| 34.472
| 79
| 0.736134
|
513832377978a1091e3bdf1fbeb60c45fe940b88
| 2,130
|
py
|
Python
|
emo_clf2.py
|
kayzhou/Guba_emotion
|
286f1824500c77d8b90c3dc1bb0e120d732a546d
|
[
"MIT"
] | 6
|
2018-09-04T12:42:22.000Z
|
2020-12-12T12:12:48.000Z
|
emo_clf2.py
|
kayzhou/Guba_emotion
|
286f1824500c77d8b90c3dc1bb0e120d732a546d
|
[
"MIT"
] | 1
|
2018-11-14T04:03:44.000Z
|
2018-11-14T12:01:53.000Z
|
emo_clf2.py
|
kayzhou/Guba_emotion
|
286f1824500c77d8b90c3dc1bb0e120d732a546d
|
[
"MIT"
] | null | null | null |
import json
import os
from collections import Counter
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import SVC
from thulac import thulac
from tqdm import tqdm_notebook as tqdm
from sklearn.externals import joblib
thu = thulac(seg_only=True)
def load_stopword():
"""
加载停用词集合
"""
return set(json.load(open('data/stopword-zh.json')))
def load_word_vec():
"""
加载ACL2018词向量
"""
word_vec = {}
print('加载词向量中 ...')
for i, line in enumerate(open('data/sgns.merge.word')):
# if i <= 100:
# continue
if i > 10000:
break
words = line.strip().split(' ')
word = words[0]
vec = np.array([float(num) for num in words[1:]])
word_vec[word] = vec
print('加载词完成!')
return word_vec
def load_train_data(in_name):
"""
加载训练数据
"""
X = []
y = []
for line in open(in_name):
label, vec = line.strip().split('\t')
x = np.array([float(v) for v in vec.split(',')])
y.append(label)
X.append(x)
return X, y
def train():
# X, y = load_train_data('train_data_one_hot-20180710.txt')
X, y = load_train_data('train_data_ACL-20180710.txt')
# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=41)
# 初始化分类器
clf = RandomForestClassifier(max_depth=20, random_state=3)
# clf = BernoulliNB()
# clf = SVC(C=0.5) # SVM较为耗时
# 执行训练
clf.fit(X_train, y_train)
# 模型评估
print(cross_val_score(clf, X, y, cv=10).mean())
y_pred = []
for i in range(len(X_test)):
y = clf.predict(X_test[i].reshape(1, -1))
# print(y[0])
y_pred.append(y[0])
print(classification_report(y_test, y_pred))
# 保存模型
clf = RandomForestClassifier(max_depth=20, random_state=3)
y = np.reshape(y, (1, -1))
clf.fit(X, y)
joblib.dump(clf, "emo-rf-v1.model")
train()
| 23.406593
| 93
| 0.622535
|
f8dbd050c9b9256b6b4cb4ee13887c28ab95d300
| 2,000
|
py
|
Python
|
src/lcmap/client/scripts/cl_tool/model.py
|
lcmap/client-py
|
fc356d9b2917f8e2d0e73048c9bf86982caa6676
|
[
"NASA-1.3"
] | null | null | null |
src/lcmap/client/scripts/cl_tool/model.py
|
lcmap/client-py
|
fc356d9b2917f8e2d0e73048c9bf86982caa6676
|
[
"NASA-1.3"
] | null | null | null |
src/lcmap/client/scripts/cl_tool/model.py
|
lcmap/client-py
|
fc356d9b2917f8e2d0e73048c9bf86982caa6676
|
[
"NASA-1.3"
] | null | null | null |
import io
import logging
import subprocess
import sys
import click
from lcmap.client.scripts.cl_tool import query
from lcmap.client.scripts.cl_tool.command import lcmap
log = logging.getLogger(__name__)
@lcmap.group()
@click.pass_obj
def model(config):
"Execute science models in the LCMAP Science Execution Environment."
@model.command()
@click.pass_obj
# Rod query options:
@click.option('--spectra', '-s', multiple=True, type=query.spectra_choices)
@click.option('--x', '-x', type=int)
@click.option('--y', '-y', type=int)
@click.option('--t1')
@click.option('--t2')
@click.option('--mask/--no-mask', is_flag=True, default=True)
@click.option('--shape/--no-shape', is_flag=True, default=True)
@click.option('--unscale/--scale', is_flag=True, default=True)
@click.option('--format', default="plain-text", type=query.format_choices)
# CCDC options:
@click.option('--row', type=int)
@click.option('--col', type=int)
@click.option('--out-dir', default="stdout")
@click.option('--scene-list', default="stdin")
@click.option('--verbose', is_flag=True, default=False)
# Model cli options
@click.option('--local', is_flag=True, default=False)
@click.option('--stdout', is_flag=True, default=True)
def ccdc(config, spectra, x, y, t1, t2, mask, shape, unscale, format,
row, col, out_dir, scene_list, verbose, local, stdout):
if local is False:
print("Renmote execution of models not yet supported.")
sys.exit(1)
if verbose:
verbose = "--verbose"
query_results = query.rod_query(
spectra, x, y, t1, t2, mask, shape, unscale, format)
stdin = io.StringIO()
stdin.write(query_results)
p = subprocess.Popen(
["ccdc",
"--row=" + row,
"--col=" + col,
"--outDir" + out_dir,
"--sceneList" + scene_list,
verbose],
stdin=stdin,
stdout=subprocess.PIPE)
ccdc_results = p.communicate()[0]
if stdout:
print(ccdc_results)
else:
return ccdc_results
| 29.411765
| 75
| 0.6575
|
6850bd215f27202b492ba5c6b7965d6debca8ee7
| 1,294
|
py
|
Python
|
30-substring-with-concatenation-of-all-words/s.py
|
typd/leetcode-solutions
|
96a7824be1e3339f679d20abfb3cea3aaf08cd46
|
[
"MIT"
] | null | null | null |
30-substring-with-concatenation-of-all-words/s.py
|
typd/leetcode-solutions
|
96a7824be1e3339f679d20abfb3cea3aaf08cd46
|
[
"MIT"
] | null | null | null |
30-substring-with-concatenation-of-all-words/s.py
|
typd/leetcode-solutions
|
96a7824be1e3339f679d20abfb3cea3aaf08cd46
|
[
"MIT"
] | null | null | null |
class Solution(object):
def findSubstring(self,s,words):
r = []
wc = len(words)
if wc == 0:
return r
wl = len(words[0])
sl = len(s)
m = {}
for w in words:
if not w in m:
m[w] = 1
else:
m[w] = m[w]+1
i = 0
print(m)
while i + wc * wl <= sl:
#print("checking", s[i:])
count_down = m.copy()
for j in range(wc):
w = s[i+j*wl:i+j*wl+wl]
if not w in count_down:
break
else:
count_down[w] = count_down[w] - 1
#print(" ", count_down)
all_right = True
for k in count_down:
if count_down[k] != 0:
all_right = False
break
if all_right:
r.append(i)
i+=1
return r
def test():
ss = Solution()
s = "barfoothefoobarman"
words = ["foo","bar"]
s = "wordgoodgoodgoodbestword",
words = ["word","good","best","word"]
s= "wordgoodgoodgoodbestword"
words=["word","good","best","good"]
r = ss.findSubstring(s,words)
print(s)
print(words)
print(r)
test()
| 24.415094
| 53
| 0.413447
|
f8097cdd84bf3b05e5fb79e15cb75fa97cb4e77d
| 4,793
|
py
|
Python
|
mogua/wallet/wallet_user_store.py
|
vanthoi/mogua-blockchain
|
1e46ee2ee4fc98b87aede276608b3bd95971f05a
|
[
"Apache-2.0"
] | 16
|
2021-08-01T14:29:14.000Z
|
2022-02-09T04:32:05.000Z
|
mogua/wallet/wallet_user_store.py
|
vanthoi/mogua-blockchain
|
1e46ee2ee4fc98b87aede276608b3bd95971f05a
|
[
"Apache-2.0"
] | 18
|
2021-08-03T22:07:27.000Z
|
2022-02-03T11:08:42.000Z
|
mogua/wallet/wallet_user_store.py
|
vanthoi/mogua-blockchain
|
1e46ee2ee4fc98b87aede276608b3bd95971f05a
|
[
"Apache-2.0"
] | 5
|
2021-09-13T10:23:35.000Z
|
2022-03-15T08:43:19.000Z
|
from typing import List, Optional
import aiosqlite
from mogua.util.db_wrapper import DBWrapper
from mogua.util.ints import uint32
from mogua.wallet.util.wallet_types import WalletType
from mogua.wallet.wallet_info import WalletInfo
class WalletUserStore:
"""
WalletUserStore keeps track of all user created wallets and necessary smart-contract data
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS users_wallets("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_type int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on users_wallets(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS type on users_wallets(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS data on users_wallets(data)")
await self.db_connection.commit()
await self.init_wallet()
return self
async def init_wallet(self):
all_wallets = await self.get_all_wallet_info_entries()
if len(all_wallets) == 0:
await self.create_wallet("MoGua Wallet", WalletType.STANDARD_WALLET, "")
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM users_wallets")
await cursor.close()
await self.db_connection.commit()
async def create_wallet(
self, name: str, wallet_type: int, data: str, id: Optional[int] = None, in_transaction=False
) -> Optional[WalletInfo]:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO users_wallets VALUES(?, ?, ?, ?)",
(id, name, wallet_type, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
return await self.get_last_wallet()
async def delete_wallet(self, id: int, in_transaction: bool):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(f"DELETE FROM users_wallets where id={id}")
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def update_wallet(self, wallet_info: WalletInfo, in_transaction):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT or REPLACE INTO users_wallets VALUES(?, ?, ?, ?)",
(
wallet_info.id,
wallet_info.name,
wallet_info.type,
wallet_info.data,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_last_wallet(self) -> Optional[WalletInfo]:
cursor = await self.db_connection.execute("SELECT MAX(id) FROM users_wallets;")
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return await self.get_wallet_by_id(row[0])
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
"""
Return a set containing all wallets
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets")
rows = await cursor.fetchall()
await cursor.close()
result = []
for row in rows:
result.append(WalletInfo(row[0], row[1], row[2], row[3]))
return result
async def get_wallet_by_id(self, id: int) -> Optional[WalletInfo]:
"""
Return a wallet by id
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletInfo(row[0], row[1], row[2], row[3])
| 33.055172
| 105
| 0.605049
|
144d6906a79ec7fe56193f50b1f28d3eb97b2e95
| 2,438
|
py
|
Python
|
tests/sensor_proc_test.py
|
raunaqbhirangi/reskin_sensor
|
02b86d26b29ae6abdb5411580291eeac3ae7d272
|
[
"MIT"
] | 31
|
2021-11-01T13:47:24.000Z
|
2022-03-29T08:57:59.000Z
|
tests/sensor_proc_test.py
|
raunaqbhirangi/reskin_sensor
|
02b86d26b29ae6abdb5411580291eeac3ae7d272
|
[
"MIT"
] | 3
|
2021-11-05T15:08:31.000Z
|
2022-01-20T23:16:37.000Z
|
tests/sensor_proc_test.py
|
raunaqbhirangi/reskin_sensor
|
02b86d26b29ae6abdb5411580291eeac3ae7d272
|
[
"MIT"
] | 8
|
2021-11-01T13:48:14.000Z
|
2022-02-25T08:17:56.000Z
|
import argparse
import time
from reskin_sensor import ReSkinProcess
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Test code to run a ReSkin streaming process in the background. Allows data to be collected without code blocking"
)
# fmt: off
parser.add_argument("-p", "--port", type=str, help="port to which the microcontroller is connected", required=True,)
parser.add_argument("-b", "--baudrate", type=str, help="baudrate at which the microcontroller is streaming data", default=115200,)
parser.add_argument("-n", "--num_mags", type=int, help="number of magnetometers on the sensor board", default=5,)
parser.add_argument("-tf", "--temp_filtered", action="store_true", help="flag to filter temperature from sensor output",)
# fmt: on
args = parser.parse_args()
# Create sensor stream
sensor_stream = ReSkinProcess(
num_mags=args.num_mags,
port=args.port,
baudrate=args.baudrate,
burst_mode=True,
device_id=1,
temp_filtered=args.temp_filtered,
)
# Start sensor stream
sensor_stream.start()
time.sleep(0.1)
# Buffer data for two seconds and return buffer
if sensor_stream.is_alive():
sensor_stream.start_buffering()
buffer_start = time.time()
time.sleep(2.0)
sensor_stream.pause_buffering()
buffer_stop = time.time()
# Get buffered data
buffered_data = sensor_stream.get_buffer()
if buffered_data is not None:
print(
"Time elapsed: {}, Number of datapoints: {}".format(
buffer_stop - buffer_start, len(buffered_data)
)
)
# Get a specified number of samples
test_samples = sensor_stream.get_data(num_samples=5)
print(
"Columns: ",
", \t".join(
[
"T{0}, \tBx{0}, \tBy{0}, \tBz{0}".format(ind)
for ind in range(args.num_mags)
]
),
)
for sid, sample in enumerate(test_samples):
print(
"Sample {}: ".format(sid + 1)
+ str(["{:.2f}".format(d) for d in sample.data])
)
# Pause sensor stream
sensor_stream.pause_streaming()
sensor_stream.join()
| 33.861111
| 135
| 0.575062
|
a3a995e59ac038d6aef01cbdcb86db150b25e3c8
| 5,424
|
py
|
Python
|
docs/source/conf.py
|
akx/PhiK
|
7f1dd3ed08b527a95ecb3e8cb973a02616e71d1d
|
[
"Apache-2.0"
] | 92
|
2018-12-28T14:03:05.000Z
|
2022-03-23T16:56:05.000Z
|
docs/source/conf.py
|
akx/PhiK
|
7f1dd3ed08b527a95ecb3e8cb973a02616e71d1d
|
[
"Apache-2.0"
] | 34
|
2019-06-19T16:17:17.000Z
|
2022-03-25T08:20:04.000Z
|
docs/source/conf.py
|
akx/PhiK
|
7f1dd3ed08b527a95ecb3e8cb973a02616e71d1d
|
[
"Apache-2.0"
] | 24
|
2018-12-18T16:41:18.000Z
|
2022-03-05T11:25:07.000Z
|
# -*- coding: utf-8 -*-
#
# PhiK documentation build configuration file for sphinx.
#
#
import os
#from unittest.mock import MagicMock
import phik
# Classes that use non-python modules are not always available in the
# RTD environment. By mocking them we can still import these classes
# in the code and RTD can subsequently go through the code and get
# the docstrings.
#class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return MagicMock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath(''))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Phi_K correlation library'
copyright = '2018, KPMG Advisory N.V.'
author = 'KPMG Advanced Analytics & Big Data team'
version = phik.version.full_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*test*', 'phik.tutorials.*']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PhiKdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PhiK.tex', 'PhiK Documentation',
'KPMG Advanced Analytics & Big Data team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'phik', 'PhiK Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PhiK', 'PhiK Documentation',
author, 'PhiK', 'One line description of project.',
'Miscellaneous'),
]
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
| 31.352601
| 98
| 0.683813
|
850c986a216092aaba51cc01a3713e13c5066b7c
| 441
|
py
|
Python
|
chapter2_1.py
|
LuGuo25/Python
|
8ea59fb1ab2ac88a765816d77acc676365521940
|
[
"Apache-2.0"
] | 1
|
2021-05-18T10:53:11.000Z
|
2021-05-18T10:53:11.000Z
|
chapter2_1.py
|
LuGuo25/Python
|
8ea59fb1ab2ac88a765816d77acc676365521940
|
[
"Apache-2.0"
] | null | null | null |
chapter2_1.py
|
LuGuo25/Python
|
8ea59fb1ab2ac88a765816d77acc676365521940
|
[
"Apache-2.0"
] | null | null | null |
s="hello,python" \
"wow"#定义字符串类型变量 \为转义符
print(s)
print(s[1:3])#字符串切片 区间左闭右开 同时也说明字符串s的第一个字母对应数字“0”
print(s[1-3])#这里的1-3是做减法1-3=-2,即取字符串s的倒数第二个字母
num1=50#整数
num2=3.14#浮点型
num3=1+2j#复数
num4=123e5#科学计数法
print(int(num4))#输出num4,num4的类型为浮点型
n=None#空类型
print(num1!=num2 or num2==num1)#输出判断结果#not优先级大于or和and
p="hello world"
#p=20#不同于VB,Python可以任意更改变量类型
s1=0O27#赋值任意一个八进制数
s2=0b100110100#二进制
s3=0xA21#十六进制
print(s1)
print(s2)
print(s3)#输出各个进制数
| 19.173913
| 53
| 0.75737
|
135f40e5568764fa0d9676e3c23a5d0d52391d60
| 13,419
|
py
|
Python
|
models/single_stream/pretrain_with_mlm.py
|
codezakh/ALBEF
|
16aee1da1b7682afcd5a5f1ded74fc8dc199a8cf
|
[
"BSD-3-Clause"
] | null | null | null |
models/single_stream/pretrain_with_mlm.py
|
codezakh/ALBEF
|
16aee1da1b7682afcd5a5f1ded74fc8dc199a8cf
|
[
"BSD-3-Clause"
] | 2
|
2022-02-02T12:55:59.000Z
|
2022-02-17T14:39:19.000Z
|
models/single_stream/pretrain_with_mlm.py
|
codezakh/ALBEF
|
16aee1da1b7682afcd5a5f1ded74fc8dc199a8cf
|
[
"BSD-3-Clause"
] | null | null | null |
from functools import partial
from models.vit import VisionTransformer, interpolate_pos_embed
# from models.xbert import BertConfig, BertForMaskedLM
from models.xbert import BertConfig, BertModel, BertForMaskedLM
from typing import Dict
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
import random
class ALBEF(nn.Module):
def __init__(self,
text_encoder: str = None,
tokenizer = None,
config: Dict = None,
temp: float = 0.07,
init_deit = True
):
super().__init__()
self.tokenizer = tokenizer
self.mlm_probability = config['mlm_probability']
embed_dim = config['embed_dim']
self.visual_encoder = VisionTransformer(
img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6))
if init_deit:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True)
state_dict = checkpoint["model"]
pos_embed_reshaped = interpolate_pos_embed(state_dict['pos_embed'], self.visual_encoder)
state_dict['pos_embed'] = pos_embed_reshaped
msg = self.visual_encoder.load_state_dict(state_dict,strict=False)
print(msg)
# vision_width = config['vision_width']
bert_config = BertConfig.from_json_file(config['bert_config'])
# self.text_encoder = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config)
self.text_encoder = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config)
# for param in self.text_encoder.embeddings.word_embeddings.parameters():
# param.requires_grad = False
text_width = self.text_encoder.config.hidden_size
# self.vision_proj = nn.Linear(vision_width, embed_dim)
# self.text_proj = nn.Linear(text_width, embed_dim)
# self.temp = nn.Parameter(torch.ones([]) * config['temp'])
self.queue_size = config['queue_size']
self.momentum = config['momentum']
self.itm_head = nn.Linear(text_width, 2)
# create momentum models
# self.visual_encoder_m = VisionTransformer(
# img_size=config['image_res'], patch_size=16, embed_dim=768, depth=12, num_heads=12,
# mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6))
# self.vision_proj_m = nn.Linear(vision_width, embed_dim)
# self.text_encoder_m = BertForMaskedLM.from_pretrained(text_encoder, config=bert_config)
# self.text_encoder_m = BertModel.from_pretrained(text_encoder, config=bert_config, add_pooling_layer=False)
# self.text_proj_m = nn.Linear(text_width, embed_dim)
# self.model_pairs = [[self.visual_encoder,self.visual_encoder_m],
# [self.vision_proj,self.vision_proj_m],
# [self.text_encoder,self.text_encoder_m],
# [self.text_proj,self.text_proj_m],
# ]
# self.copy_params()
# create the queue
# self.register_buffer("image_queue", torch.randn(embed_dim, self.queue_size))
# self.register_buffer("text_queue", torch.randn(embed_dim, self.queue_size))
# self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
# self.image_queue = nn.functional.normalize(self.image_queue, dim=0)
# self.text_queue = nn.functional.normalize(self.text_queue, dim=0)
def make_sentence_pair(self, text_token_ids, text_attn_mask, image_embeds, image_atts, device):
text_token_ids = text_token_ids.clone()
with torch.no_grad():
text_token_ids[:, 0] = self.tokenizer.sep_token_id
# Create the [CLS] prefix for the visual token.
# prefix = torch.zeros(image_embeds.shape[0], 1).to(image.device) * self.tokenizer.cls_token_id
# prefix = prefix.long()
# prefix_embeds = self.text_encoder.bert.embeddings.word_embeddings(prefix)
# Get the word embeddings for language.
word_embeddings = self.text_encoder.bert.embeddings.word_embeddings(text_token_ids)
# Concatenate it all to make the input sentence.
mm_model_input = torch.cat([image_embeds, word_embeddings], dim=1)
# Create the attention mask for the combined sentence.
imtext_attention_mask = torch.cat([image_atts, text_attn_mask], dim=1)
# Get the token_type_ids.
# Following the BERT convention, the token_type_ids for the first sentence is 0,
# and the second sentence is 1. To achieve this, we can simply concatenate the attention mask
# of the text with a zero tensor.
text_token_type_ids = text_attn_mask.clone()
with torch.no_grad():
text_token_type_ids[:, 0] = 0 # the [SEP] between the sentences is considered as sentence B.
token_type_ids = torch.cat([torch.zeros_like(image_atts).to(device), text_token_type_ids], dim=1)
return mm_model_input, imtext_attention_mask, token_type_ids
def forward(self, image, text, alpha=0):
# with torch.no_grad():
# self.temp.clamp_(0.001,0.5)
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
mm_pos_words, mm_pos_att_mask, mm_pos_token_type_ids = self.make_sentence_pair(
text.input_ids,
text.attention_mask,
image_embeds,
image_atts,
image.device
)
output_pos = self.text_encoder.bert(
inputs_embeds=mm_pos_words,
attention_mask=mm_pos_att_mask,
token_type_ids=mm_pos_token_type_ids,
return_dict = True,
mode = 'text'
)
with torch.no_grad():
bs = image.size(0)
weights_i2t = torch.ones(bs, bs).to(image.device)
weights_t2i = torch.ones(bs, bs).to(image.device)
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
# select a negative image for each text
image_embeds_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
image_embeds_neg.append(image_embeds[neg_idx])
image_embeds_neg = torch.stack(image_embeds_neg,dim=0)
# select a negative text for each image
text_tokens_neg = []
text_att_masks_neg = []
for b in range(bs):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_tokens_neg.append(text.input_ids[neg_idx])
text_att_masks_neg.append(text.attention_mask[neg_idx])
text_tokens_neg = torch.stack(text_tokens_neg,dim=0)
text_att_masks_neg = torch.stack(text_att_masks_neg,dim=0)
text_tokens_all = torch.cat([text.input_ids, text_tokens_neg],dim=0)
text_att_masks_all = torch.cat([text.attention_mask, text_att_masks_neg],dim=0)
image_embeds_all = torch.cat([image_embeds_neg,image_embeds],dim=0)
image_atts_all = torch.cat([image_atts,image_atts],dim=0)
mm_neg_words, mm_neg_att_mask, mm_neg_token_type_ids = self.make_sentence_pair(
text_tokens_all,
text_att_masks_all,
image_embeds_all,
image_atts_all,
image.device
)
output_neg= self.text_encoder.bert(
inputs_embeds=mm_neg_words,
attention_mask=mm_neg_att_mask,
token_type_ids=mm_neg_token_type_ids,
return_dict = True,
mode = 'text'
)
vl_embeddings = torch.cat([output_pos.last_hidden_state[:,0,:], output_neg.last_hidden_state[:,0,:]],dim=0)
vl_output = self.itm_head(vl_embeddings)
itm_labels = torch.cat([torch.ones(bs,dtype=torch.long),torch.zeros(2*bs,dtype=torch.long)],
dim=0).to(image.device)
loss_itm = F.cross_entropy(vl_output, itm_labels)
##================= MLM ========================##
input_ids = text.input_ids.clone()
labels = input_ids.clone()
probability_matrix = torch.full(labels.shape, self.mlm_probability)
input_ids, labels = self.mask(input_ids, self.text_encoder.config.vocab_size, image.device, targets=labels,
probability_matrix = probability_matrix)
with torch.no_grad():
logits_m = self.text_encoder(input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
return_logits = True,
)
mlm_output = self.text_encoder(input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
labels = labels,
soft_labels = F.softmax(logits_m,dim=-1),
alpha = alpha
)
loss_mlm = mlm_output.loss
return loss_itm, loss_mlm
@torch.no_grad()
def copy_params(self):
for model_pair in self.model_pairs:
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
param_m.data.copy_(param.data) # initialize
param_m.requires_grad = False # not update by gradient
@torch.no_grad()
def _momentum_update(self):
for model_pair in self.model_pairs:
for param, param_m in zip(model_pair[0].parameters(), model_pair[1].parameters()):
param_m.data = param_m.data * self.momentum + param.data * (1. - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, image_feat, text_feat):
# gather keys before updating queue
image_feats = concat_all_gather(image_feat)
text_feats = concat_all_gather(text_feat)
batch_size = image_feats.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.image_queue[:, ptr:ptr + batch_size] = image_feats.T
self.text_queue[:, ptr:ptr + batch_size] = text_feats.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr
def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
if masked_indices is None:
masked_indices = torch.bernoulli(probability_matrix).bool()
masked_indices[input_ids == self.tokenizer.pad_token_id] = False
masked_indices[input_ids == self.tokenizer.cls_token_id] = False
if targets is not None:
targets[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
input_ids[indices_replaced] = self.tokenizer.mask_token_id
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
input_ids[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
if targets is not None:
return input_ids, targets
else:
return input_ids
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| 46.272414
| 122
| 0.601013
|
5ffaab595d87f46c07038a477fa0459db4c995f6
| 3,230
|
py
|
Python
|
fuzzy_toolbox/core.py
|
jdvelasq/pyfuzzy
|
4b8c5948f5d05202ec914a60e2bd420133a57e90
|
[
"MIT"
] | null | null | null |
fuzzy_toolbox/core.py
|
jdvelasq/pyfuzzy
|
4b8c5948f5d05202ec914a60e2bd420133a57e90
|
[
"MIT"
] | null | null | null |
fuzzy_toolbox/core.py
|
jdvelasq/pyfuzzy
|
4b8c5948f5d05202ec914a60e2bd420133a57e90
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
def format_plot(title=None, view_xaxis=True, view_yaxis=False):
plt.gca().set_ylim(-0.05, 1.05)
plt.gca().spines["bottom"].set_visible(True)
plt.gca().spines["left"].set_visible(False)
plt.gca().spines["right"].set_visible(False)
plt.gca().spines["top"].set_visible(False)
plt.gca().spines["bottom"].set_color("gray")
plt.gca().get_yaxis().set_visible(False)
if view_yaxis == "left":
plt.gca().get_yaxis().set_visible(True)
if view_yaxis == "right":
plt.gca().get_yaxis().set_visible(True)
plt.gca().yaxis.tick_right()
plt.gca().get_xaxis().set_visible(view_xaxis)
if title is not None:
plt.gca().set_title(title)
def plot_fuzzyvariable(
universe, memberships, labels, title, fmt, linewidth, view_xaxis, view_yaxis
):
#
for label, membership in zip(labels, memberships):
plt.gca().plot(universe, membership, fmt, label=label, linewidth=linewidth)
plt.gca().legend()
#
format_plot(
title=title,
view_xaxis=view_xaxis,
view_yaxis=view_yaxis,
)
# plt.gca().spines["left"].set_color("lightgray")
def plot_crisp_input(
value, universe, membership, name, view_xaxis=True, view_yaxis="left"
):
plt.gca().plot(universe, membership, "-k", linewidth=1)
membership_value = np.interp(
x=value,
xp=universe,
fp=membership,
)
membership = np.where(membership <= membership_value, membership, membership_value)
plt.gca().fill_between(universe, membership, color="gray", alpha=0.7)
if name is None:
title = None
else:
title = "{} = {}".format(name, value)
format_plot(
title=title,
view_xaxis=view_xaxis,
view_yaxis=view_yaxis,
)
plt.gca().vlines(x=value, ymin=-0.0, ymax=1.0, color="red", linewidth=2)
def plot_fuzzy_input(
value, universe, membership, name, view_xaxis=True, view_yaxis="left"
):
plt.gca().plot(universe, membership, "-k", linewidth=1)
plt.gca().fill_between(universe, value, color="gray", alpha=0.7)
format_plot(
title=name,
view_xaxis=view_xaxis,
view_yaxis=view_yaxis,
)
def apply_modifiers(membership, modifiers):
def slightly(u):
plus_u = np.power(u, 1.25)
not_very_u = 1 - np.power(u, 2)
u = np.where(u < not_very_u, plus_u, not_very_u)
u = u / np.max(u)
u = np.where(u <= 0.5, u ** 2, 1 - 2 * (1 - u) ** 2)
return u
fn = {
"VERY": lambda u: np.power(u, 2),
"SOMEWHAT": lambda u: np.power(u, 1.0 / 3.0),
"MORE_OR_LESS": lambda u: np.power(u, 0.5),
"EXTREMELY": lambda u: np.power(u, 3),
"PLUS": lambda u: np.power(u, 1.25),
"INTENSIFY": lambda u: np.where(
u <= 0.5, np.power(u, 2), 1 - 2 * np.power(1 - u, 2)
),
"NORM": lambda u: u / np.max(u),
"NOT": lambda u: 1 - u,
"SLIGHTLY": lambda u: slightly(u),
}
membership = membership.copy()
modifiers = modifiers.copy()
modifiers.reverse()
for modifier in modifiers:
membership = fn[modifier](membership)
return membership
| 26.916667
| 87
| 0.603715
|
806fea77fbd229ab66fcb8986b98387ad03a9872
| 13,272
|
py
|
Python
|
mgz/model/__init__.py
|
happyleavesaoc/mgz
|
e59e6596268b041f1b5e308b30c736f951116358
|
[
"MIT"
] | null | null | null |
mgz/model/__init__.py
|
happyleavesaoc/mgz
|
e59e6596268b041f1b5e308b30c736f951116358
|
[
"MIT"
] | null | null | null |
mgz/model/__init__.py
|
happyleavesaoc/mgz
|
e59e6596268b041f1b5e308b30c736f951116358
|
[
"MIT"
] | null | null | null |
"""Convert parsed data into object-oriented model."""
import codecs
import collections
import _hashlib
import hashlib
from datetime import timedelta, datetime
from enum import Enum
import dataclasses
from mgz import fast
from mgz.reference import get_consts, get_dataset
from mgz.fast import Action as ActionEnum
from mgz.fast.header import parse
from mgz.model.definitions import *
from mgz.model.inputs import Inputs
from mgz.common.chat import parse_chat, Chat as ChatEnum
from mgz.common.diplomacy import get_diplomacy_type
from mgz.common.map import get_map_data
from mgz.util import Version
TC_IDS = [71, 109, 141, 142]
def enrich_action(action, action_data, dataset, consts):
"""Enrich action data with lookups."""
if 'x' in action_data and 'y' in action_data:
action.position = Position(action_data['x'], action_data['y'])
del action.payload['x']
del action.payload['y']
if 'technology_id' in action_data:
action.payload['technology'] = dataset['technologies'].get(str(action_data['technology_id']))
if 'formation_id' in action_data:
action.payload['formation'] = consts['formations'].get(str(action_data['formation_id']))
if 'stance_id' in action_data:
action.payload['stance'] = consts['stances'].get(str(action_data['stance_id']))
if 'building_id' in action_data:
action.payload['building'] = dataset['objects'].get(str(action_data['building_id']))
if 'unit_id' in action_data:
action.payload['unit'] = dataset['objects'].get(str(action_data['unit_id']))
if 'command_id' in action_data:
action.payload['command'] = consts['commands'].get(str(action_data['command_id']))
if 'order_id' in action_data:
action.payload['order'] = consts['orders'].get(str(action_data['order_id']))
if 'resource_id' in action_data:
action.payload['resource'] = consts['resources'].get(str(action_data['resource_id']))
def get_difficulty(data):
if data['version'] is Version.HD:
return data['hd']['difficulty_id']
elif data['version'] is Version.DE:
return data['de']['difficulty_id']
return data['scenario']['difficulty_id']
def get_team_together(data):
if data['version'] is Version.DE:
return data['de']['team_together']
return None
def get_lock_speed(data):
if data['version'] is Version.DE:
return data['de']['lock_speed']
return None
def get_all_technologies(data):
if data['version'] is Version.DE:
return data['de']['all_technologies']
return None
def get_starting_age(data):
if data['version'] is Version.DE:
return data['de']['starting_age_id']
return None
def get_hash(data):
if data['version'] is Version.DE:
return data['de']['hash']
return None
def parse_match(handle):
"""Parse a match.
This is one big function because the dependency graph between
the variables is dense.
"""
data = parse(handle)
body_pos = handle.tell() - 4 # log version
consts = get_consts()
dataset_id, dataset = get_dataset(data['version'], data['mod'])
map_id = data['hd']['map_id'] if data['version'] is Version.HD else data['scenario']['map_id']
try:
map_data, encoding, language = get_map_data(
map_id,
data['scenario']['instructions'],
data['map']['dimension'],
data['version'],
dataset_id,
dataset,
data['map']['tiles'],
de_seed=data['lobby']['seed']
)
except ValueError:
raise RuntimeError("could not get map data")
# Handle DE-specific data
if data['de']:
de_players = {player['number']: player for player in data['de']['players']}
lobby = data['de']['lobby']
guid = data['de']['guid']
else:
de_players = dict()
lobby = None
guid = None
# Parse gaia objects
gaia = [
Object(
dataset['objects'].get(str(obj['object_id'])),
obj['class_id'],
obj['object_id'],
obj['instance_id'],
obj['index'],
Position(obj['position']['x'], obj['position']['y'])
)
for obj in data['players'][0]['objects']
]
inputs = Inputs({o.instance_id:o.name for o in gaia})
# Parse players
players = dict()
allies = dict()
for player in data['players'][1:]:
allies[player['number']] = set([player['number']])
for i, stance in enumerate(player['diplomacy']):
if stance == 2:
allies[player['number']].add(i)
de_player = de_players.get(player['number'])
if de_player:
player.update(de_player)
pos_x = None
pos_y = None
for obj in player['objects']:
if obj['object_id'] in TC_IDS:
pos_x = obj['position']['x']
pos_y = obj['position']['y']
players[player['number']] = Player(
player['number'],
player['name'].decode(encoding),
consts['player_colors'][str(player['color_id'])],
player['color_id'],
dataset['civilizations'][str(player['civilization_id'])]['name'],
player['civilization_id'],
Position(pos_x, pos_y),
[
Object(
dataset['objects'].get(str(obj['object_id'])),
obj['class_id'],
obj['object_id'],
obj['instance_id'],
obj['index'],
Position(obj['position']['x'], obj['position']['y'])
)
for obj in player['objects']
],
player.get('profile_id'),
player.get('prefer_random')
)
# Assign teams
if de_players:
by_team = collections.defaultdict(list)
for number, player in de_players.items():
if player['team_id'] > 1:
by_team[player['team_id']].append(number)
elif player['team_id'] == 1:
by_team[number + 9].append(number)
team_ids = by_team.values()
else:
team_ids = set([frozenset(s) for s in allies.values()])
teams = []
for team in team_ids:
t = [players[x] for x in team]
for x in team:
players[x].team = t
teams.append(t)
# Compute diplomacy
diplomacy_type = get_diplomacy_type(teams, players)
# Extract lobby chat
pd = [dict(name=p.name, number=n) for n, p in players.items()]
chats = []
for c in data['lobby']['chat']:
chat = parse_chat(c, encoding, 0, pd, diplomacy_type, 'lobby')
if chat['player_number'] not in players:
continue
chats.append(Chat(
timedelta(milliseconds=chat['timestamp']),
chat['message'],
chat['origination'],
chat['audience'],
players[chat['player_number']]
))
inputs.add_chat(chats[-1])
# Parse player actions
fast.meta(handle)
timestamp = 0
resigned = []
actions = []
viewlocks = []
last_viewlock = None
while True:
try:
op_type, op_data = fast.operation(handle)
if op_type is fast.Operation.SYNC:
timestamp += op_data[0]
elif op_type is fast.Operation.VIEWLOCK:
if op_data == last_viewlock:
continue
viewlock = Viewlock(timedelta(milliseconds=timestamp), Position(*op_data), players[data['metadata']['owner_id']])
viewlocks.append(viewlock)
last_viewlock = op_data
elif op_type is fast.Operation.CHAT:
chat = parse_chat(op_data, encoding, timestamp, pd, diplomacy_type, 'game')
if chat['type'] == ChatEnum.MESSAGE:
chats.append(Chat(
timedelta(milliseconds=chat['timestamp'] + data['map']['restore_time']),
chat['message'],
chat['origination'],
chat['audience'],
players[chat['player_number']]
))
inputs.add_chat(chats[-1])
elif op_type is fast.Operation.ACTION:
action_type, action_data = op_data
action = Action(timedelta(milliseconds=timestamp), action_type, action_data)
if action_type is fast.Action.RESIGN:
resigned.append(players[action_data['player_id']])
if 'player_id' in action_data and action_data['player_id'] in players:
action.player = players[action_data['player_id']]
del action.payload['player_id']
enrich_action(action, action_data, dataset, consts)
actions.append(action)
inputs.add_action(action)
except EOFError:
break
# Compute winner(s)
for team in teams:
winner = not any([player for player in team if player in resigned])
if resigned:
for player in team:
player.winner = winner
handle.seek(body_pos)
file_bytes = handle.read()
file_size = body_pos + 4 + len(file_bytes)
file_hash = hashlib.sha1(file_bytes).hexdigest()
return Match(
list(players.values()),
teams,
gaia,
Map(
map_id,
map_data['name'],
map_data['dimension'],
consts['map_sizes'][str(map_data['dimension'])],
map_data['custom'],
map_data['seed'],
map_data['name'].startswith('ZR@'),
map_data['modes'],
[
Tile(
tile['terrain_id'],
tile['elevation'],
Position(tile['x'], tile['y'])
) for tile in map_data['tiles']
]
),
File(
codecs.lookup(encoding),
language,
file_hash,
file_size,
players[data['metadata']['owner_id']],
viewlocks
),
data['map']['restore_time'] > 0,
timedelta(milliseconds=data['map']['restore_time']),
consts['speeds'][str(int(round(data['metadata']['speed'], 2) * 100))],
int(round(data['metadata']['speed'], 2) * 100),
data['metadata']['cheats'],
data['lobby']['lock_teams'],
data['lobby']['population'],
chats,
guid,
lobby,
dataset['dataset']['name'],
consts['game_types'][str(data['lobby']['game_type_id'])],
data['lobby']['game_type_id'],
consts['map_reveal_choices'][str(data['lobby']['reveal_map_id'])],
data['lobby']['reveal_map_id'],
consts['difficulties'][str(get_difficulty(data))],
get_difficulty(data),
consts['starting_ages'].get(str(get_starting_age(data))),
get_starting_age(data),
get_team_together(data),
get_lock_speed(data),
get_all_technologies(data),
True if data['version'] is Version.DE else None,
timedelta(milliseconds=timestamp + data['map']['restore_time']),
diplomacy_type,
bool(resigned),
data['version'],
data['game_version'],
data['save_version'],
data['log_version'],
data['de']['build'] if data['version'] is Version.DE else None,
datetime.fromtimestamp(data['de']['timestamp']) if data['version'] is Version.DE and data['de']['timestamp'] else None,
timedelta(seconds=data['de']['spec_delay']) if data['version'] is Version.DE else None,
data['de']['allow_specs'] if data['version'] is Version.DE else None,
data['de']['hidden_civs'] if data['version'] is Version.DE else None,
data['de']['visibility_id'] == 2 if data['version'] is Version.DE else None,
get_hash(data),
actions,
inputs.inputs
)
def serialize(obj):
"""Serialize model.
Returns a nested datastructure with no circular references,
appropriate for dumping to JSON, YAML, etc.
"""
seen = set()
def impl(obj):
"""Recursive serialization implementation."""
if dataclasses.is_dataclass(obj) and isinstance(obj, collections.Hashable):
if obj in seen:
return hash(obj)
seen.add(obj)
if type(obj) is list:
return [v for v in [impl(o) for o in obj] if v is not None]
elif type(obj) is dict:
return {k:v for k, v in {f:impl(d) for f, d in obj.items()}.items() if v is not None}
elif dataclasses.is_dataclass(obj):
return {k:v for k, v in {f.name:impl(getattr(obj, f.name)) for f in dataclasses.fields(obj)}.items() if v is not None}
elif isinstance(obj, (codecs.CodecInfo, Enum)):
return obj.name
elif isinstance(obj, timedelta):
return str(obj)
elif isinstance(obj, datetime):
return str(obj)
elif isinstance(obj, bytes):
return None
elif isinstance(obj, _hashlib.HASH):
return obj.hexdigest()
else:
return obj
return impl(obj)
| 35.392
| 130
| 0.568942
|
ef1be1c73ffb7384d1935fc37b9a23d945b08f64
| 3,728
|
py
|
Python
|
pyatv/mrp/protobuf/SetDiscoveryModeMessage_pb2.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | null | null | null |
pyatv/mrp/protobuf/SetDiscoveryModeMessage_pb2.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | null | null | null |
pyatv/mrp/protobuf/SetDiscoveryModeMessage_pb2.py
|
acheronfail/pyatv
|
9cb96ffcc49938c4b43c92b7b40ddcecae37e732
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/SetDiscoveryModeMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/SetDiscoveryModeMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n0pyatv/mrp/protobuf/SetDiscoveryModeMessage.proto\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\"9\n\x17SetDiscoveryModeMessage\x12\x0c\n\x04mode\x18\x01 \x01(\x05\x12\x10\n\x08\x66\x65\x61tures\x18\x02 \x01(\x05:K\n\x17setDiscoveryModeMessage\x12\x10.ProtocolMessage\x18R \x01(\x0b\x32\x18.SetDiscoveryModeMessage'
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,])
SETDISCOVERYMODEMESSAGE_FIELD_NUMBER = 82
setDiscoveryModeMessage = _descriptor.FieldDescriptor(
name='setDiscoveryModeMessage', full_name='setDiscoveryModeMessage', index=0,
number=82, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_SETDISCOVERYMODEMESSAGE = _descriptor.Descriptor(
name='SetDiscoveryModeMessage',
full_name='SetDiscoveryModeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='SetDiscoveryModeMessage.mode', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='features', full_name='SetDiscoveryModeMessage.features', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=94,
serialized_end=151,
)
DESCRIPTOR.message_types_by_name['SetDiscoveryModeMessage'] = _SETDISCOVERYMODEMESSAGE
DESCRIPTOR.extensions_by_name['setDiscoveryModeMessage'] = setDiscoveryModeMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SetDiscoveryModeMessage = _reflection.GeneratedProtocolMessageType('SetDiscoveryModeMessage', (_message.Message,), {
'DESCRIPTOR' : _SETDISCOVERYMODEMESSAGE,
'__module__' : 'pyatv.mrp.protobuf.SetDiscoveryModeMessage_pb2'
# @@protoc_insertion_point(class_scope:SetDiscoveryModeMessage)
})
_sym_db.RegisterMessage(SetDiscoveryModeMessage)
setDiscoveryModeMessage.message_type = _SETDISCOVERYMODEMESSAGE
pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(setDiscoveryModeMessage)
# @@protoc_insertion_point(module_scope)
| 40.967033
| 334
| 0.804721
|
ff4265f4c34957c1695e94a6e6a1677cc32248b0
| 4,401
|
py
|
Python
|
dazhongdianping/shop_spider2.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | 9
|
2018-08-28T07:53:43.000Z
|
2019-07-09T07:55:52.000Z
|
dazhongdianping/shop_spider2.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | null | null | null |
dazhongdianping/shop_spider2.py
|
mannuan/pyspider_script
|
f4c988912e1099eacd0322b4e9c3a87eaaaa526f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Created on 2017-12-07 22:55:40
# Project: sdad
from pyspider.libs.base_handler import *
import json,pymysql,time
class Handler(BaseHandler):
crawl_config = {
"headers" : {
'Content-Type': 'application/json; charset=utf-8',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13C75 Safari/601.1',
}
}
@every(minutes=24 * 60)
def on_start(self):
limit = 50
cityid=3602
for i in range(120):
start = i*limit
url = 'http://m.dianping.com/isoapi/module'
url += "?start={}&cityid={}".format(start,cityid)
data = {"moduleInfoList":[{"moduleName":"mapiSearch","query":{"search":{"start":start,"limit":limit,"cityid":cityid},"loaders":["list"]}}],"pageEnName":"shopList"}
data = json.dumps(data)
self.crawl(url, method='POST', data=data, callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
ob_json = response.json
list_shops = ob_json.get('data').get('moduleInfoList')[0].get('moduleData').get('data')
if list_shops is None:
return
else:
if list_shops.get('listData') is None:
return
else:
list_shops = list_shops.get('listData').get('list')
for shop in list_shops: # 遍历
authorityLabelType = shop.get('authorityLabelType') # int
branchName = shop.get('branchName') # 店铺所属分支
categoryId = shop.get('categoryId') # 菜系id,int
categoryName = shop.get('categoryName') # 菜系
cityId = shop.get('cityId') # 所在城市的id,int
defaultPic = shop.get('defaultPic') # 店铺的头像
dishtags = shop.get('dishtags') # 菜的种类
id = shop.get('id') # 店铺的id,int
matchText = shop.get('matchText') # 店铺的匹配字段
name = shop.get('name') # 店铺的名字
newShop = str(shop.get('newShop')) # 布尔类型
orderDish = str(shop.get('orderDish')) # 布尔类型
priceText = shop.get('priceText') # 店铺的平均价格
regionName = shop.get('regionName') # 店铺所属的行政区
reviewCount = shop.get('reviewCount') # 店铺的评论数,int
scoreText = shop.get('scoreText')
shopPower = shop.get('shopPower') # 店铺的评分(总分50),int
shopType = shop.get('shopType') # 店铺的类别,int
status = shop.get('status') # 店铺的状态,int
tagList = shop.get('tagList')
if tagList is None:
tag = None
else:
tag_list = list()
for tag in tagList:
tag_list.append(tag.get('text'))
tag = ''
for tl in tag_list:
tag += tl+','
result = []
result.extend([authorityLabelType,branchName,categoryId,categoryName,cityId,defaultPic,dishtags,id,matchText,name,newShop,orderDish,priceText,regionName,reviewCount,scoreText,shopPower,shopType,status,tag])
# print id
self.crawl('http://m.dianping.com/shop/{}/map'.format(id), fetch_type='js', save={'result':result}, callback=self.detail_page)
@config(priority=2)
def detail_page(self, response):
obj = response.text.split('window.PAGE_INITIAL_STATE = ')[1].split(';\n </script>')[0]
address = json.loads(obj).get('_context').get('pageInitData').get('address')
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) # 爬虫的时间
result = [address,crawl_time]
result.extend(response.save['result'])
return result
def on_result(self, result):
if not result:
return
conn = pymysql.connect(host='127.0.0.1', port=3306, user='repository', passwd='repository', db='repository',charset='utf8mb4')
cur = conn.cursor()
try:
sql = 'REPLACE INTO dazhongdianping_shop values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
# 批量插入
cur.execute(sql,result)
conn.commit()
except Exception as e:
print(e)
conn.rollback()
# 释放数据连接
if cur:
cur.close()
if conn:
conn.close()
| 42.728155
| 218
| 0.556464
|
09684fb78a7587290512184e92a535ec3d14af3a
| 426
|
py
|
Python
|
inventory/templatetags/indirect.py
|
Eising/viconf
|
56b80e340a173dcba013e2c4f6568a1407d418a2
|
[
"MIT"
] | 3
|
2018-07-13T12:50:37.000Z
|
2018-07-13T22:43:49.000Z
|
inventory/templatetags/indirect.py
|
Eising/viconf
|
56b80e340a173dcba013e2c4f6568a1407d418a2
|
[
"MIT"
] | null | null | null |
inventory/templatetags/indirect.py
|
Eising/viconf
|
56b80e340a173dcba013e2c4f6568a1407d418a2
|
[
"MIT"
] | null | null | null |
from django import template
from util.validators import ViconfValidators
import sys
register = template.Library()
@register.simple_tag
def indirect(variable, key):
return variable[key]
@register.simple_tag
def validatorclass(name):
validators = ViconfValidators.VALIDATORS
if name == 'none':
return ""
if name in validators:
return validators[name]['css_class']
else:
return ""
| 20.285714
| 44
| 0.706573
|
072416802835c87642cf434baf400f34318801f4
| 245
|
py
|
Python
|
cryptocurrency_tracker_plugin/__init__.py
|
BotDevGroup/cryptocurrency_tracker_plugin
|
7d2ce68553daebce34d8a72e26915e2a95a84c50
|
[
"MIT"
] | null | null | null |
cryptocurrency_tracker_plugin/__init__.py
|
BotDevGroup/cryptocurrency_tracker_plugin
|
7d2ce68553daebce34d8a72e26915e2a95a84c50
|
[
"MIT"
] | null | null | null |
cryptocurrency_tracker_plugin/__init__.py
|
BotDevGroup/cryptocurrency_tracker_plugin
|
7d2ce68553daebce34d8a72e26915e2a95a84c50
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = """Ricardo Arturo Cabral Mejia"""
__email__ = 'me@ricardocabral.io'
__version__ = '0.1.0'
from cryptocurrency_tracker_plugin.base import CryptocurrencyTrackerPlugin
plugin = CryptocurrencyTrackerPlugin()
| 22.272727
| 74
| 0.763265
|
fb1c788140d2ec16451dec9cc94ce933756dc5b9
| 9,692
|
py
|
Python
|
tests/test_runner/test_fp16.py
|
jinliwei1997/mmcv
|
f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79
|
[
"Apache-2.0"
] | 3,748
|
2018-10-12T08:39:46.000Z
|
2022-03-31T17:22:55.000Z
|
tests/test_runner/test_fp16.py
|
jinliwei1997/mmcv
|
f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79
|
[
"Apache-2.0"
] | 1,637
|
2018-10-12T06:06:18.000Z
|
2022-03-31T02:20:53.000Z
|
tests/test_runner/test_fp16.py
|
jinliwei1997/mmcv
|
f8d46df4a9fa32fb44d2e92a4ca5e7b26ee9cb79
|
[
"Apache-2.0"
] | 1,234
|
2018-10-12T09:28:20.000Z
|
2022-03-31T15:56:24.000Z
|
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner.fp16_utils import auto_fp16, cast_tensor_type, force_fp32
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject(object):
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject(object):
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
# apply to optional input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
# out_fp16=True
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
| 32.199336
| 75
| 0.652291
|
8c2ff245c0593cfa9054ce08ec587cbada1a78cd
| 919
|
py
|
Python
|
Category.py
|
Auggen21/Optical-Mark-Reader-using-python
|
95e2efa2fb17ad3e5f3ad3d221f9e3417149b071
|
[
"MIT"
] | null | null | null |
Category.py
|
Auggen21/Optical-Mark-Reader-using-python
|
95e2efa2fb17ad3e5f3ad3d221f9e3417149b071
|
[
"MIT"
] | null | null | null |
Category.py
|
Auggen21/Optical-Mark-Reader-using-python
|
95e2efa2fb17ad3e5f3ad3d221f9e3417149b071
|
[
"MIT"
] | 1
|
2020-08-25T18:56:45.000Z
|
2020-08-25T18:56:45.000Z
|
import cv2
import numpy as np
def category(cat):
cate=""
orginal=np.uint8(cat)
orginal=cv2.resize(orginal,(202,626))
c = ["GEN","OBC1","OBC2","SC","ST","PH"]
h,w = orginal.shape
crop= orginal[140:h-5,140:w-5]
# cv2.imshow('l',crop)
h1,w1 = crop.shape
th, im_th = cv2.threshold(crop,127,255,0)
im_th=~im_th
kernel = np.ones((5,5), np.uint8)
binary = cv2.erode(im_th, kernel, iterations=2)
count = 0
for x in range(0, h1,np.uint(np.floor(h1/6))):
if (x+int(h1/6) > h1):
break
row = binary[x:x+int(h1/6),:]
visr=crop[x:x+int(h1/6),:]
count+=1
# cv2.imshow("Foreground", visr)
# cv2.waitKey(0)
_,cnts, _ = cv2.findContours(row, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) == 1:
cate=c[count-1]
return cate
| 24.837838
| 86
| 0.525571
|
ff7b7df8e5bb3b3ecdf76dea4f84a97fa01dda5d
| 2,456
|
py
|
Python
|
sdk/python/pulumi_azure_native/kusto/v20190121/list_database_principals.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kusto/v20190121/list_database_principals.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/kusto/v20190121/list_database_principals.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListDatabasePrincipalsResult',
'AwaitableListDatabasePrincipalsResult',
'list_database_principals',
]
@pulumi.output_type
class ListDatabasePrincipalsResult:
"""
The list Kusto database principals operation response.
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.DatabasePrincipalResponse']]:
"""
The list of Kusto database principals.
"""
return pulumi.get(self, "value")
class AwaitableListDatabasePrincipalsResult(ListDatabasePrincipalsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabasePrincipalsResult(
value=self.value)
def list_database_principals(cluster_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabasePrincipalsResult:
"""
The list Kusto database principals operation response.
:param str cluster_name: The name of the Kusto cluster.
:param str database_name: The name of the database in the Kusto cluster.
:param str resource_group_name: The name of the resource group containing the Kusto cluster.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:kusto/v20190121:listDatabasePrincipals', __args__, opts=opts, typ=ListDatabasePrincipalsResult).value
return AwaitableListDatabasePrincipalsResult(
value=__ret__.value)
| 35.085714
| 151
| 0.690961
|
c4a15e5278b7ce630de1eb6de4ae5e71f29a5ce5
| 2,745
|
py
|
Python
|
scrapy_dynamic_spiders/factories/crawl_spider_factory.py
|
harootune/scrapy_dynamic_spiders
|
a443533c17af6ba906e28fc897f7a6e4d19c2ed0
|
[
"MIT"
] | null | null | null |
scrapy_dynamic_spiders/factories/crawl_spider_factory.py
|
harootune/scrapy_dynamic_spiders
|
a443533c17af6ba906e28fc897f7a6e4d19c2ed0
|
[
"MIT"
] | null | null | null |
scrapy_dynamic_spiders/factories/crawl_spider_factory.py
|
harootune/scrapy_dynamic_spiders
|
a443533c17af6ba906e28fc897f7a6e4d19c2ed0
|
[
"MIT"
] | 1
|
2020-11-24T15:48:26.000Z
|
2020-11-24T15:48:26.000Z
|
# stdlib
import copy
from typing import List
# third party
from scrapy.spiders import Rule
# local
import scrapy_dynamic_spiders.utils.factory_utils as f_utils
from scrapy_dynamic_spiders.factories import SpiderClsFactory
class CrawlSpiderClsFactory(SpiderClsFactory):
"""Generates temporary CrawlSpider classes based on the factory's attributes."""
def __init__(self, custom_settings: dict = None, settings_ow: bool = False,
extractor_configs: List[dict] = None, rule_configs: List[dict] = None, rule_ow: bool = False):
# parent constructor #
super().__init__(custom_settings=custom_settings, settings_ow=settings_ow)
# attributes#
# public
self.extractor_configs = extractor_configs if extractor_configs else []
self.rule_configs = rule_configs if rule_configs else []
self.rule_ow = rule_ow
def _construct_rule_list(self, spidercls) -> List[Rule]:
"""
Constructs a list of rules for a new temporary CrawlSpider subclass, based on the factory's attributes and
the provided template spider class
:param spidercls: The CrawlSpider class or a CrawlSpider subclass
:return: a list of Rules
"""
# construct rules
if self.rule_ow:
rules = []
else:
rules = copy.deepcopy(spidercls.rules)
if not rules:
rules = []
for i in range(len(self.rule_configs)):
if not self.extractor_configs:
rules.append(f_utils.construct_rule({}, self.rule_configs[i]))
else:
# handles case where there are fewer extractor configs than rule configs
try:
rules.append(f_utils.construct_rule(self.extractor_configs[i], self.rule_configs[i]))
except IndexError:
rules.append(f_utils.construct_rule(self.extractor_configs[-1], self.rule_configs[i]))
return rules
def construct_spider(self, spidercls) -> type:
"""
Generates a temporary spider class based off of a provided temporary class and the factory's attributes
:param spidercls: The CrawlSpider class or a CrawlSpider subclass
:return: A Spider-derived class object
"""
if not spidercls:
raise AttributeError('Cannot construct a Spider without a template class.')
self._count += 1
settings = self._construct_custom_settings(spidercls)
rules = self._construct_rule_list(spidercls)
class_vars = {
'custom_settings': settings,
'rules': rules
}
return type(f'{spidercls.__name__}-{self._count}', (spidercls,), class_vars)
| 37.60274
| 114
| 0.650273
|
2214026ff78eb85af2bec4b7d01e9d03368ff233
| 5,818
|
py
|
Python
|
swagger_client/models/budget_notification.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/budget_notification.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
swagger_client/models/budget_notification.py
|
chbndrhnns/ahoi-client
|
8bd25f541c05af17c82904fa250272514b7971f2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.amount import Amount # noqa: F401,E501
from swagger_client.models.notification import Notification # noqa: F401,E501
class BudgetNotification(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_id': 'int',
'lower_threshold': 'Amount',
'upper_threshold': 'Amount'
}
attribute_map = {
'account_id': 'accountId',
'lower_threshold': 'lowerThreshold',
'upper_threshold': 'upperThreshold'
}
def __init__(self, account_id=None, lower_threshold=None, upper_threshold=None): # noqa: E501
"""BudgetNotification - a model defined in Swagger""" # noqa: E501
self._account_id = None
self._lower_threshold = None
self._upper_threshold = None
self.discriminator = None
self.account_id = account_id
if lower_threshold is not None:
self.lower_threshold = lower_threshold
if upper_threshold is not None:
self.upper_threshold = upper_threshold
@property
def account_id(self):
"""Gets the account_id of this BudgetNotification. # noqa: E501
Identifier of the account to which this notification belongs # noqa: E501
:return: The account_id of this BudgetNotification. # noqa: E501
:rtype: int
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this BudgetNotification.
Identifier of the account to which this notification belongs # noqa: E501
:param account_id: The account_id of this BudgetNotification. # noqa: E501
:type: int
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def lower_threshold(self):
"""Gets the lower_threshold of this BudgetNotification. # noqa: E501
Optional limitation; lower threshold of the amount (negative values allowed) above which notifications will be sent # noqa: E501
:return: The lower_threshold of this BudgetNotification. # noqa: E501
:rtype: Amount
"""
return self._lower_threshold
@lower_threshold.setter
def lower_threshold(self, lower_threshold):
"""Sets the lower_threshold of this BudgetNotification.
Optional limitation; lower threshold of the amount (negative values allowed) above which notifications will be sent # noqa: E501
:param lower_threshold: The lower_threshold of this BudgetNotification. # noqa: E501
:type: Amount
"""
self._lower_threshold = lower_threshold
@property
def upper_threshold(self):
"""Gets the upper_threshold of this BudgetNotification. # noqa: E501
Optional limitation; upper threshold of the amount (negative values allowed) below which notifications will be sent # noqa: E501
:return: The upper_threshold of this BudgetNotification. # noqa: E501
:rtype: Amount
"""
return self._upper_threshold
@upper_threshold.setter
def upper_threshold(self, upper_threshold):
"""Sets the upper_threshold of this BudgetNotification.
Optional limitation; upper threshold of the amount (negative values allowed) below which notifications will be sent # noqa: E501
:param upper_threshold: The upper_threshold of this BudgetNotification. # noqa: E501
:type: Amount
"""
self._upper_threshold = upper_threshold
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BudgetNotification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.245714
| 277
| 0.629598
|
39f7581810ffad7e25f15dc7d5d25470e8223da5
| 8,755
|
py
|
Python
|
sdks/python/apache_beam/typehints/native_type_compatibility_test.py
|
ibzib/beam
|
f98104a22b69972744a13378e17af5f2361fbb3e
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/typehints/native_type_compatibility_test.py
|
ibzib/beam
|
f98104a22b69972744a13378e17af5f2361fbb3e
|
[
"Apache-2.0"
] | 1
|
2020-09-03T06:16:36.000Z
|
2020-09-10T07:08:27.000Z
|
sdks/python/apache_beam/typehints/native_type_compatibility_test.py
|
ibzib/beam
|
f98104a22b69972744a13378e17af5f2361fbb3e
|
[
"Apache-2.0"
] | 1
|
2020-07-25T15:36:45.000Z
|
2020-07-25T15:36:45.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for Beam type compatibility library."""
# pytype: skip-file
from __future__ import absolute_import
import sys
import typing
import unittest
from apache_beam.typehints import typehints
from apache_beam.typehints.native_type_compatibility import convert_to_beam_type
from apache_beam.typehints.native_type_compatibility import convert_to_beam_types
from apache_beam.typehints.native_type_compatibility import convert_to_typing_type
from apache_beam.typehints.native_type_compatibility import convert_to_typing_types
from apache_beam.typehints.native_type_compatibility import is_any
_TestNamedTuple = typing.NamedTuple(
'_TestNamedTuple', [('age', int), ('name', bytes)])
_TestFlatAlias = typing.Tuple[bytes, float]
_TestNestedAlias = typing.List[_TestFlatAlias]
class _TestClass(object):
pass
class NativeTypeCompatibilityTest(unittest.TestCase):
def test_convert_to_beam_type(self):
test_cases = [
('raw bytes', bytes, bytes),
('raw int', int, int),
('raw float', float, float),
('any', typing.Any, typehints.Any),
('simple dict', typing.Dict[bytes, int],
typehints.Dict[bytes, int]),
('simple list', typing.List[int], typehints.List[int]),
('simple iterable', typing.Iterable[int], typehints.Iterable[int]),
('simple optional', typing.Optional[int], typehints.Optional[int]),
('simple set', typing.Set[float], typehints.Set[float]),
('simple unary tuple', typing.Tuple[bytes],
typehints.Tuple[bytes]),
('simple union', typing.Union[int, bytes, float],
typehints.Union[int, bytes, float]),
('namedtuple', _TestNamedTuple, _TestNamedTuple),
('test class', _TestClass, _TestClass),
('test class in list', typing.List[_TestClass],
typehints.List[_TestClass]),
('complex tuple', typing.Tuple[bytes, typing.List[typing.Tuple[
bytes, typing.Union[int, bytes, float]]]],
typehints.Tuple[bytes, typehints.List[typehints.Tuple[
bytes, typehints.Union[int, bytes, float]]]]),
# TODO(BEAM-7713): This case seems to fail on Py3.5.2 but not 3.5.4.
('arbitrary-length tuple', typing.Tuple[int, ...],
typehints.Tuple[int, ...])
if sys.version_info >= (3, 5, 4) else None,
('flat alias', _TestFlatAlias, typehints.Tuple[bytes, float]), # type: ignore[misc]
('nested alias', _TestNestedAlias,
typehints.List[typehints.Tuple[bytes, float]]),
('complex dict',
typing.Dict[bytes, typing.List[typing.Tuple[bytes, _TestClass]]],
typehints.Dict[bytes, typehints.List[typehints.Tuple[
bytes, _TestClass]]]),
('type var', typing.TypeVar('T'), typehints.TypeVariable('T')),
('nested type var',
typing.Tuple[typing.TypeVar('K'), typing.TypeVar('V')],
typehints.Tuple[typehints.TypeVariable('K'),
typehints.TypeVariable('V')]),
('iterator', typing.Iterator[typing.Any],
typehints.Iterator[typehints.Any]),
]
for test_case in test_cases:
if test_case is None:
continue
# Unlike typing types, Beam types are guaranteed to compare equal.
description = test_case[0]
typing_type = test_case[1]
expected_beam_type = test_case[2]
converted_beam_type = convert_to_beam_type(typing_type)
self.assertEqual(converted_beam_type, expected_beam_type, description)
converted_typing_type = convert_to_typing_type(converted_beam_type)
self.assertEqual(converted_typing_type, typing_type, description)
def test_generator_converted_to_iterator(self):
self.assertEqual(
typehints.Iterator[int],
convert_to_beam_type(typing.Generator[int, None, None]))
def test_newtype(self):
self.assertEqual(
typehints.Any, convert_to_beam_type(typing.NewType('Number', int)))
def test_pattern(self):
# TODO(BEAM-10254): Unsupported.
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern))
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern[str]))
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Pattern[bytes]))
def test_match(self):
# TODO(BEAM-10254): Unsupported.
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match))
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match[str]))
self.assertEqual(typehints.Any, convert_to_beam_type(typing.Match[bytes]))
def test_forward_reference(self):
self.assertEqual(typehints.Any, convert_to_beam_type('int'))
self.assertEqual(typehints.Any, convert_to_beam_type('typing.List[int]'))
self.assertEqual(
typehints.List[typehints.Any], convert_to_beam_type(typing.List['int']))
def test_convert_nested_to_beam_type(self):
self.assertEqual(typehints.List[typing.Any], typehints.List[typehints.Any])
self.assertEqual(
typehints.List[typing.Dict[int, str]],
typehints.List[typehints.Dict[int, str]])
def test_convert_bare_types(self):
# Conversions for unsubscripted types that have implicit subscripts.
test_cases = [
('bare list', typing.List, typehints.List[typehints.TypeVariable('T')]),
(
'bare dict',
typing.Dict,
typehints.Dict[typehints.TypeVariable('KT'),
typehints.TypeVariable('VT')]),
(
'bare tuple',
typing.Tuple,
typehints.Tuple[typehints.TypeVariable('T'), ...]),
('bare set', typing.Set, typehints.Set[typehints.TypeVariable('T')]),
(
'bare iterator',
typing.Iterator,
typehints.Iterator[typehints.TypeVariable('T_co')]),
(
'bare iterable',
typing.Iterable,
typehints.Iterable[typehints.TypeVariable('T_co')]),
(
'nested bare',
typing.Tuple[typing.Iterator],
typehints.Tuple[typehints.Iterator[typehints.TypeVariable('T_co')]]
),
]
if sys.version_info >= (3, 7):
test_cases += [
(
'bare generator',
typing.Generator,
typehints.Generator[typehints.TypeVariable('T_co')]),
]
for test_case in test_cases:
description = test_case[0]
typing_type = test_case[1]
expected_beam_type = test_case[2]
converted_beam_type = convert_to_beam_type(typing_type)
self.assertEqual(expected_beam_type, converted_beam_type, description)
def test_convert_bare_types_fail(self):
# These conversions should fail.
test_cases = [
('bare union', typing.Union),
]
if sys.version_info < (3, 7):
test_cases += [
('bare generator', typing.Generator),
]
for test_case in test_cases:
description = test_case[0]
typing_type = test_case[1]
with self.assertRaises(ValueError, msg=description):
convert_to_beam_type(typing_type)
def test_convert_to_beam_types(self):
typing_types = [
bytes,
typing.List[bytes],
typing.List[typing.Tuple[bytes, int]],
typing.Union[int, typing.List[int]]
]
beam_types = [
bytes,
typehints.List[bytes],
typehints.List[typehints.Tuple[bytes, int]],
typehints.Union[int, typehints.List[int]]
]
converted_beam_types = convert_to_beam_types(typing_types)
self.assertEqual(converted_beam_types, beam_types)
converted_typing_types = convert_to_typing_types(converted_beam_types)
self.assertEqual(converted_typing_types, typing_types)
def test_is_any(self):
test_cases = [
(True, typing.Any),
(False, typing.List[int]),
(False, typing.Union),
(False, 1),
(False, 'a'),
]
for expected, typ in test_cases:
self.assertEqual(expected, is_any(typ), msg='%s' % typ)
if __name__ == '__main__':
unittest.main()
| 39.084821
| 92
| 0.673901
|
a4a93aca43b590bcb1de4a09ea229931056d8ad8
| 987
|
py
|
Python
|
Python_Assistant/PyAssist-BasicFiles/WxPython.py
|
GeekyShiva/PyAssist
|
a8761cfcd8344771e7e1bfab469ed3e49f12adda
|
[
"MIT"
] | null | null | null |
Python_Assistant/PyAssist-BasicFiles/WxPython.py
|
GeekyShiva/PyAssist
|
a8761cfcd8344771e7e1bfab469ed3e49f12adda
|
[
"MIT"
] | null | null | null |
Python_Assistant/PyAssist-BasicFiles/WxPython.py
|
GeekyShiva/PyAssist
|
a8761cfcd8344771e7e1bfab469ed3e49f12adda
|
[
"MIT"
] | 1
|
2020-08-17T15:01:43.000Z
|
2020-08-17T15:01:43.000Z
|
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition, size=wx.Size(450, 100),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="PyAssist")
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel,
label="Hello I am PyAssist the Python Digital Assistant. How may I help you?")
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,size=(400,30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
def OnEnter(self, event):
input = self.txt.GetValue()
input = input.lower()
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop()
| 31.83871
| 86
| 0.601824
|
f0b1e423146e680526351e0f11d27cbd6867ff8e
| 4,130
|
py
|
Python
|
src/ColorTheories/classes/Color.py
|
pgscasado/Color-Theories
|
58119879cbc6161720ac4f16ae9949fbdbbcf063
|
[
"MIT"
] | null | null | null |
src/ColorTheories/classes/Color.py
|
pgscasado/Color-Theories
|
58119879cbc6161720ac4f16ae9949fbdbbcf063
|
[
"MIT"
] | 1
|
2021-05-04T18:38:22.000Z
|
2021-05-04T18:38:22.000Z
|
src/ColorTheories/classes/Color.py
|
pgscasado/Color-Theories
|
58119879cbc6161720ac4f16ae9949fbdbbcf063
|
[
"MIT"
] | 1
|
2021-04-27T23:32:21.000Z
|
2021-04-27T23:32:21.000Z
|
import copy
import math
import operator
# Implementação de Color:
# - exemplo para criar uma cor independentemente de outra:
# Color("nome", red=213, green="123", blue="0", alpha="50")
# - exemplo para criar uma cor como resultado de um processo aditivo entre duas cores:
# red = Color("red", 255, 0, 0, 255)
# green = Color("green", 0, 255, 0, 255)
# yellow = red + green + "yellow"
# + note que quando uma cor é somada a outra, ela resultará noutra cor,
# + e que quando uma cor for somada a uma string, a string definirá o nome da cor.
# - exemplo para criar uma cor como resultado de um processo subtrativo entre duas cores:
# green = Color("green", 0, 255, 0, 255)
# blue = Color("blue", 0, 0, 255, 255)
# cyan = green + blue + "cyan"
# blue == cyan - green - "blue" -> True
class Color:
color_names = set()
def __hash__(self):
return hash((self.red, self.green, self.blue))
def __init__(self, name, red, green, blue, alpha, difficulty):
self.name = name
self.red = min(red, 255)
self.green = min(green, 255)
self.blue = min(blue, 255)
self.alpha = min(alpha, 255)
self.difficulty = difficulty
Color.color_names.add(self)
@classmethod
def fromNewColor(cls, obj, new_name, new_diff):
obj.name = new_name
obj.difficulty = new_diff
if(obj in Color.color_names):
Color.color_names.remove(obj)
Color.color_names.add(obj)
return cls(obj.name,*tuple(obj), obj.difficulty)
def __iter__(self):
yield self.red
yield self.green
yield self.blue
yield self.alpha
def __eq__(self, o: object) -> bool:
#sanitization
if not callable(getattr(o, "__iter__", None)): return False
iterable = iter(o)
red = next(iterable)
green = next(iterable)
blue = next(iterable)
alpha = next(iterable)
return (-1 <= self.red - red <= 1) and (-1 <= self.green - green <= 1) and (-1 <= self.blue - blue <= 1) and (self.alpha == alpha)
def __add__(self, other: object):
_tmp = copy.deepcopy(self)
if type(other) is not Color:
return _tmp
else:
_tmp.red = min(_tmp.red + other.red, 255)
_tmp.green = min(_tmp.green + other.green, 255)
_tmp.blue = min(_tmp.blue + other.blue, 255)
_tmp.alpha = min(math.floor(((_tmp.alpha/255)+((other.alpha/255)*_tmp.alpha/255))*255), 255)
_tmp = self._update_static_values(_tmp)
return _tmp
def __sub__(self, other):
_tmp = copy.deepcopy(self)
if type(other) is not Color:
return _tmp
else:
_tmp.red = max(_tmp.red - other.red,0)
_tmp.green = max(_tmp.green - other.green,0)
_tmp.blue = max(_tmp.blue - other.blue,0)
_tmp.alpha = min(math.floor(((_tmp.alpha/255)+((other.alpha/255)*_tmp.alpha/255))*255), 255)
_tmp = self._update_static_values(_tmp)
return _tmp
def __mul__(self, other):
_tmp = copy.deepcopy(self)
if type(other) is not Color:
return _tmp
else:
_tmp.name = "unknown"
_tmp.red = min(other.red, _tmp.red)
_tmp.green = min(other.green, _tmp.green)
_tmp.blue = min(other.blue, _tmp.blue)
_tmp.alpha = min(math.floor(((_tmp.alpha/255)+((other.alpha/255)*_tmp.alpha/255))*255), 255)
_tmp = self._update_static_values(_tmp)
return _tmp
def __truediv__(self, other):
_tmp = copy.deepcopy(self)
if type(other) is not Color:
return _tmp
else:
_tmp.red = abs(_tmp.red - other.red)
_tmp.green = abs(_tmp.green - other.green)
_tmp.blue = abs(_tmp.blue - other.blue)
_tmp.alpha = min(math.floor(((_tmp.alpha/255)+((other.alpha/255)*_tmp.alpha/255))*255), 255)
_tmp = self._update_static_values(_tmp)
return _tmp
@classmethod
def sorted(cls, colors: list):
bwg = [color for color in colors if color.name in ('black', 'white', 'gray')]
colors = [color for color in colors if color not in bwg]
bwg = sorted(bwg, key=lambda c: c.red+c.green+c.blue, reverse=True)
colors = sorted(colors, key=lambda c: c.red+c.green+c.blue)
return bwg + colors
def _update_static_values(self, _tmp):
known_color = [color for color in Color.color_names if _tmp == color]
_tmp.name = known_color[0].name if known_color else 'unknown'
_tmp.difficulty = known_color[0].difficulty if known_color else _tmp.difficulty
return _tmp
| 33.306452
| 132
| 0.680387
|
35e847134f56b1cc2104b2e46a2c0853d8504f28
| 8,513
|
py
|
Python
|
tests/test_kb_construction.py
|
chaithyagr/torchkbnufft
|
3592175fe2d1f611fb2cfec4d4150a850c92605f
|
[
"MIT"
] | null | null | null |
tests/test_kb_construction.py
|
chaithyagr/torchkbnufft
|
3592175fe2d1f611fb2cfec4d4150a850c92605f
|
[
"MIT"
] | null | null | null |
tests/test_kb_construction.py
|
chaithyagr/torchkbnufft
|
3592175fe2d1f611fb2cfec4d4150a850c92605f
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from torchkbnufft import (
AdjKbNufft,
AdjMriSenseNufft,
KbInterpBack,
KbInterpForw,
KbNufft,
MriSenseNufft,
)
def test_kb_matching(testing_tol):
norm_tol = testing_tol
def check_tables(table1, table2):
for ind, table in enumerate(table1):
assert np.linalg.norm(table - table2[ind]) < norm_tol
im_szs = [(256, 256), (10, 256, 256)]
kbwidths = [2.34, 5]
orders = [0, 2]
for kbwidth in kbwidths:
for order in orders:
for im_sz in im_szs:
smap = torch.randn(*((1,) + im_sz))
base_table = AdjKbNufft(im_sz, order=order, kbwidth=kbwidth).table
cur_table = KbNufft(im_sz, order=order, kbwidth=kbwidth).table
check_tables(base_table, cur_table)
cur_table = KbInterpBack(im_sz, order=order, kbwidth=kbwidth).table
check_tables(base_table, cur_table)
cur_table = KbInterpForw(im_sz, order=order, kbwidth=kbwidth).table
check_tables(base_table, cur_table)
cur_table = MriSenseNufft(
smap, im_sz, order=order, kbwidth=kbwidth
).table
check_tables(base_table, cur_table)
cur_table = AdjMriSenseNufft(
smap, im_sz, order=order, kbwidth=kbwidth
).table
check_tables(base_table, cur_table)
def test_2d_init_inputs():
# all object initializations have assertions
# this should result in an error if any dimensions don't match
# test 2d scalar inputs
im_sz = (256, 256)
smap = torch.randn(*((1,) + im_sz))
grid_sz = (512, 512)
n_shift = (128, 128)
numpoints = 6
table_oversamp = 2 ** 10
kbwidth = 2.34
order = 0
norm = "None"
ob = KbInterpForw(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbInterpBack(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjKbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = MriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjMriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
# test 2d tuple inputs
im_sz = (256, 256)
smap = torch.randn(*((1,) + im_sz))
grid_sz = (512, 512)
n_shift = (128, 128)
numpoints = (6, 6)
table_oversamp = (2 ** 10, 2 ** 10)
kbwidth = (2.34, 2.34)
order = (0, 0)
norm = "None"
ob = KbInterpForw(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbInterpBack(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjKbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = MriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjMriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
def test_3d_init_inputs():
# all object initializations have assertions
# this should result in an error if any dimensions don't match
# test 3d scalar inputs
im_sz = (10, 256, 256)
smap = torch.randn(*((1,) + im_sz))
grid_sz = (10, 512, 512)
n_shift = (5, 128, 128)
numpoints = 6
table_oversamp = 2 ** 10
kbwidth = 2.34
order = 0
norm = "None"
ob = KbInterpForw(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbInterpBack(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjKbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = MriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjMriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
# test 3d tuple inputs
im_sz = (10, 256, 256)
smap = torch.randn(*((1,) + im_sz))
grid_sz = (10, 512, 512)
n_shift = (5, 128, 128)
numpoints = (6, 6, 6)
table_oversamp = (2 ** 10, 2 ** 10, 2 ** 10)
kbwidth = (2.34, 2.34, 2.34)
order = (0, 0, 0)
norm = "None"
ob = KbInterpForw(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbInterpBack(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
)
ob = KbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjKbNufft(
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = MriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
ob = AdjMriSenseNufft(
smap=smap,
im_size=im_sz,
grid_size=grid_sz,
n_shift=n_shift,
numpoints=numpoints,
table_oversamp=table_oversamp,
kbwidth=kbwidth,
order=order,
norm=norm,
)
| 23.845938
| 83
| 0.567133
|
ea649dba52d17f38a322683bd39d41f74856fdab
| 2,255
|
py
|
Python
|
passacre/multibase.py
|
massich/passacre_mirror
|
f2e87f334c56ab9680ab444b1be6e2dd879313d2
|
[
"CC0-1.0"
] | 47
|
2015-03-06T08:49:50.000Z
|
2022-01-09T09:10:03.000Z
|
passacre/multibase.py
|
massich/passacre_mirror
|
f2e87f334c56ab9680ab444b1be6e2dd879313d2
|
[
"CC0-1.0"
] | 5
|
2015-04-21T21:35:44.000Z
|
2019-09-30T18:43:03.000Z
|
passacre/multibase.py
|
massich/passacre_mirror
|
f2e87f334c56ab9680ab444b1be6e2dd879313d2
|
[
"CC0-1.0"
] | 10
|
2015-04-03T21:18:46.000Z
|
2020-02-11T17:15:51.000Z
|
# Copyright (c) Aaron Gallagher <_@habnab.it>
# See COPYING for details.
from __future__ import unicode_literals
class MultiBase(object):
"""Represents a base where not every digit has the same possible values.
The ``bases`` parameter must be a sequence of strings, where each item in
the sequence represents one digit, in order from most significant digit to
least significant digit. Each character in each string represents one
possible value for the corresponding digit, in order from the lowest to
highest value for that digit.
The ``max_encodable_value`` attribute is the largest integer that can be
encoded with this base.
"""
def __init__(self, bases):
self.bases = bases
def encode(self, n):
"""Encode an integer to a string, using this base.
The ``n`` parameter must be an integer. Returns the encoded string, or
raises ``ValueError`` if ``n`` is greater than the largest encodable
integer.
"""
if n > self.max_encodable_value:
raise ValueError(
'%d is greater than the largest encodable integer (%d)' % (
n, self.max_encodable_value))
ret = []
for base in reversed(self.bases):
n, d = divmod(n, len(base))
ret.append(base[d])
ret.reverse()
return ''.join(ret)
def decode(self, x):
"""Decode a string to an integer, using this base.
The ``x`` parameter must be a string as long as the ``bases`` sequence.
Returns the decoded integer or raises ``ValueError`` if the length of
``x`` is not equal to the length of ``bases`` or any of the characters
in ``x`` aren't valid digits for their position.
"""
if len(x) != len(self.bases):
raise ValueError(
"the length of %r (%d) doesn't match the number of bases (%d)" % (
x, len(x), len(self.bases)))
ret = 0
for base, d in zip(self.bases, x):
ret = (ret * len(base)) + base.index(d)
return ret
@property
def max_encodable_value(self):
ret = 1
for base in self.bases:
ret *= len(base)
return ret - 1
| 34.692308
| 82
| 0.600443
|
394bc942567928d25e5425c694865527bcd45544
| 668
|
py
|
Python
|
test.py
|
HugoSilvaSantos/creator-python-client
|
477c543ac239c68379e0f6e1e8c97c72572afddb
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
HugoSilvaSantos/creator-python-client
|
477c543ac239c68379e0f6e1e8c97c72572afddb
|
[
"BSD-3-Clause"
] | null | null | null |
test.py
|
HugoSilvaSantos/creator-python-client
|
477c543ac239c68379e0f6e1e8c97c72572afddb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Test routine for Creator Python Client .
"""
import unittest
import os
import creator_python_client
CREATOR_ACCESS_KEY = os.environ['CREATOR_ACCESS_KEY']
CREATOR_ACCESS_SECRET = os.environ['CREATOR_ACCESS_SECRET']
class CreatorTest(unittest.TestCase):
"""
Test Class.
"""
def test_ds_connection(self):
"""
Test connection against the device server.
"""
self.assertEqual(type(creator_python_client.request(
CREATOR_ACCESS_KEY, CREATOR_ACCESS_SECRET,
method="get", steps=["versions"])), dict)
if __name__ == '__main__':
unittest.main()
| 22.266667
| 60
| 0.672156
|
a025e7226c9f3c3b55853821262fb1859aa06400
| 5,105
|
py
|
Python
|
experiments/genuary2022/g22_07_sol_lewitt_wall.py
|
brendanhowell/cursor
|
81ac2e1e80f0a3ca56208b9498026fca147e7e0b
|
[
"MIT"
] | 3
|
2021-12-02T08:30:02.000Z
|
2022-03-06T18:25:15.000Z
|
experiments/genuary2022/g22_07_sol_lewitt_wall.py
|
brendanhowell/cursor
|
81ac2e1e80f0a3ca56208b9498026fca147e7e0b
|
[
"MIT"
] | 41
|
2020-03-22T13:15:04.000Z
|
2022-03-17T11:29:47.000Z
|
experiments/genuary2022/g22_07_sol_lewitt_wall.py
|
brendanhowell/cursor
|
81ac2e1e80f0a3ca56208b9498026fca147e7e0b
|
[
"MIT"
] | 2
|
2020-01-09T16:35:14.000Z
|
2022-02-28T15:21:08.000Z
|
from cursor import path
from shapely.geometry import MultiLineString
from shapely.affinity import rotate
from shapely import speedups
from math import sqrt
def hatchbox(rect, angle, spacing):
"""
returns a Shapely geometry (MULTILINESTRING, or more rarely,
GEOMETRYCOLLECTION) for a simple hatched rectangle.
args:
rect - a Shapely geometry for the outer boundary of the hatch
Likely most useful if it really is a rectangle
angle - angle of hatch lines, conventional anticlockwise -ve
spacing - spacing between hatch lines
GEOMETRYCOLLECTION case occurs when a hatch line intersects with
the corner of the clipping rectangle, which produces a point
along with the usual lines.
"""
(llx, lly, urx, ury) = rect.bounds
centre_x = (urx + llx) / 2
centre_y = (ury + lly) / 2
diagonal_length = sqrt((urx - llx) ** 2 + (ury - lly) ** 2)
number_of_lines = 2 + int(diagonal_length / spacing)
hatch_length = spacing * (number_of_lines - 1)
coords = []
for i in range(number_of_lines):
if i % 2:
coords.extend(
[
(
(
centre_x - hatch_length / 2,
centre_y - hatch_length / 2 + i * spacing,
),
(
centre_x + hatch_length / 2,
centre_y - hatch_length / 2 + i * spacing,
),
)
]
)
else:
coords.extend(
[
(
(
centre_x + hatch_length / 2,
centre_y - hatch_length / 2 + i * spacing,
),
(
centre_x - hatch_length / 2,
centre_y - hatch_length / 2 + i * spacing,
),
)
]
)
lines = MultiLineString(coords)
lines = rotate(lines, angle, origin="centroid", use_radians=False)
return rect.intersection(lines)
#######################################################
# the two primitives that actually draw stuff
def plot_point(pt, pen):
print("SP%d;PA%d,%d;PD;PU;" % (pen, int(pt.x), int(pt.y)))
def plot_linestring(line, pen):
first = 1
pts = []
for (x, y) in line.coords:
if first == 1:
first = 0
print("SP%d;PA%d,%d;PD;" % (pen, int(x), int(y)))
pts.extend((int(x), int(y)))
print("PA", ",".join(str(p) for p in pts), ";PU;")
#######################################################
# a polygon is just lines
def plot_polygon(poly, pen):
plot_linestring(poly.exterior, pen)
for i in poly.interiors:
plot_linestring(i, pen)
#######################################################
# the multi* functions: just call each type multiple times
def plot_multipoint(multipt, pen):
for i in multipt.geoms:
plot_point(i, pen)
def plot_multilinestring(multi, pen):
for i in multi.geoms:
plot_linestring(i, pen)
def plot_multipolygon(multipoly, pen):
for i in multipoly.geoms:
plot_polygon(i, pen)
#######################################################
# this one gets a bit hairy with recursion
def plot_geomcollection(geomcollection, pen):
for i in geomcollection.geoms:
plot(i, pen)
#######################################################
# type-aware plotting function
# you'll probably call this most of all
def plot(obj, pen):
gtype = obj.geom_type
if gtype == "Point":
plot_point(obj, pen)
elif gtype == "LineString":
plot_linestring(obj, pen)
elif gtype == "LinearRing":
# same as a linestring, but closed
plot_linestring(obj, pen)
elif gtype == "Polygon":
plot_polygon(obj, pen)
elif gtype == "Multipoint":
plot_multipoint(obj, pen)
elif gtype == "MultiLineString":
plot_multilinestring(obj, pen)
elif gtype == "MultiPolygon":
plot_multipolygon(obj, pen)
elif gtype == "GeomCollection":
plot_geomcollection(obj, pen)
else:
print("*** Un-handled geometry:", gtype, ":", obj)
exit(1)
#######################################################
# setup/cleanup
def init():
# enable Shapely speedups, if possible
if speedups.available:
speedups.enable()
print("IN;")
def trailer():
print("PU;SP;")
if __name__ == "__main__":
# recordings = data.DataDirHandler().recordings()
# _loader = loader.Loader(directory=recordings, limit_files=1)
# pc = _loader.all_paths()
pc_final = path.PathCollection()
for line in range(30):
p = path.Path()
p.add(0, 0)
p.add(line, 30)
for line in range(30):
p = path.Path()
p.add(30, 0)
p.add(30, line)
for line in range(30):
p = path.Path()
p.add(0, 0)
p.add(line, 30)
| 26.868421
| 70
| 0.511851
|
47cd760468122c581fc2f8c46b704397314e6075
| 7,859
|
py
|
Python
|
pyblock/pd_utils.py
|
robertodr/pyblock
|
cf97502df45685575cae251a1c7781e9786d486c
|
[
"BSD-3-Clause"
] | 21
|
2015-07-07T15:10:30.000Z
|
2021-12-13T14:25:20.000Z
|
pyblock/pd_utils.py
|
robertodr/pyblock
|
cf97502df45685575cae251a1c7781e9786d486c
|
[
"BSD-3-Clause"
] | 10
|
2017-11-01T01:37:19.000Z
|
2022-01-18T08:38:27.000Z
|
pyblock/pd_utils.py
|
robertodr/pyblock
|
cf97502df45685575cae251a1c7781e9786d486c
|
[
"BSD-3-Clause"
] | 9
|
2015-10-16T14:47:35.000Z
|
2022-02-10T11:33:22.000Z
|
'''Pandas-based wrapper around :mod:`pyblock.blocking`.'''
# copyright: (c) 2014 James Spencer
# license: modified BSD license; see LICENSE for further details.
import numpy
import pandas as pd
import pyblock.blocking
def reblock(data, axis=0, weights=None):
'''Blocking analysis of correlated data.
Parameters
----------
data : :class:`pandas.Series` or :class:`pandas.DataFrame`
Data to be blocked. See ``axis`` for order.
axis : int
If non-zero, variables in data are in rows with the columns
corresponding to the observation values. Blocking is then performed along
the rows. Otherwise each column is a variable, the observations are in the
columns and blocking is performed down the columns. Only used if data is
a :class:`pandas.DataFrame`.
weights : :class:`pandas.Series` or :class:`pandas.DataFrame`
A 1D weighting of the data to be reblocked. For multidimensional data an
identical weighting is applied to the data for each variable.
Returns
-------
data_len : :class:`pandas.Series`
Number of data points used in each reblocking iteration. Note some
reblocking iterations discard a data point if there were an odd number of
data points in the previous iteration.
block_info : :class:`pandas.DataFrame`
Mean, standard error and estimated standard error for each variable at each
reblock step.
covariance : :class:`pandas.DataFrame`
Covariance matrix at each reblock step.
See also
--------
:func:`pyblock.blocking.reblock`:
numpy-based implementation; see for documentation and notes on the
reblocking procedure. :func:`pyblock.pd_utils.reblock` is a simple wrapper
around this.
'''
try:
columns = [data.name]
if data.name is None:
columns = ['data']
axis = 0
except AttributeError:
# Have DataFrame rather than Series.
if axis:
columns = data.index.values
else:
columns = data.columns.values
if weights is not None:
if isinstance(weights, pd.DataFrame):
if numpy.min(weights.shape) > 1:
raise RuntimeError("cannot handle multidimensional weights")
weights = numpy.array(weights.unstack())
else:
weights = weights.values
block_stats = pyblock.blocking.reblock(data.values,
rowvar=axis,
weights=weights)
data_size = data.shape[axis]
optimal_blocks = pyblock.blocking.find_optimal_block(data_size, block_stats)
# Now nicely package it up into a dict of pandas/built-in objects.
iblock = []
data_len = []
block_info = []
covariance = []
keys = ['mean', 'standard error', 'standard error error', 'optimal block']
multi_keys = [(col,k) for col in columns for k in keys]
multi_keys = pd.MultiIndex.from_tuples(multi_keys)
null = numpy.zeros_like(block_stats[0].mean)
for stat in block_stats:
# Contents of stat:
# (iblock, data_len, mean, covariance, standard err,
# esimate of error in standard error)
iblock.append(stat.block)
data_len.append(stat.ndata)
pd_stat = [stat.mean, stat.std_err, stat.std_err_err, null]
pd_stat = numpy.array(pd_stat).T.flatten()
block_info.append(pd.Series(pd_stat, index=multi_keys))
# Covariance is a 2D matrix (in general) so can't put it into
# a DataFrame with everything else, so put it in its own.
cov = numpy.array(stat.cov, ndmin=2)
covariance.append(pd.DataFrame(cov, index=columns, columns=columns))
data_len = pd.Series(data_len, index=iblock, name='data length')
data_len.index.name = 'reblock'
block_info = pd.concat(block_info, axis=1, keys=iblock).transpose()
block_info.index.name = 'reblock'
loc = block_info.columns.get_level_values(1) == 'optimal block'
block_info.loc[:,loc] = ''
covariance = pd.concat(covariance, keys=iblock)
covariance.index.names = ['reblock', '']
for (ivar, optimal) in enumerate(optimal_blocks):
if optimal >= 0:
block_info.loc[optimal,(columns[ivar], 'optimal block')] = '<--- '
return (data_len, block_info, covariance)
def optimal_block(block_sub_info):
'''Get the optimal block value from the reblocking data.
Parameters
----------
block_sub_info: :class:`pandas.DataFrame` or :class:`pandas.Series`
Reblocking data (i.e. the first item of the tuple returned by ``reblock``),
or a subset thereof containing the statistics columns for one or more data
items.
Returns
-------
index : int
Reblocking index corresponding to the reblocking iteration at which serial
correlation has been removed (as estimated by the procedure in
``pyblock.blocking.find_optimal_block``). If multiple data sets are passed
in block_sub_info, this is the maximum index out of all data sets. Set to
inf if an optimal block is not found for a data set.
Raises
------
ValueError
block_sub_info contains no Series or column in DataFrame named 'optimal
block'.
'''
# Handle the following cases:
# * Series with optimal block in it.
# * block_sub_info DataFrame for one variable (no hierarchical column names)
# * block_sub_info DataFrame for multiple variables (hierarchical column names)
# (each set of columns for one variable in block_sub_info contains the mean,
# standard error and estimated error in the standard error for that
# variable).
try:
if 'optimal block' in block_sub_info.name:
iterator = [('optimal block', block_sub_info)]
else:
raise ValueError('No optimal block data')
except AttributeError:
# Have DataFrame.
# 'optimal block' is in the innermost level.
level = block_sub_info.columns.nlevels - 1
opt_cols = [col == 'optimal block'
for col in block_sub_info.columns.get_level_values(level)]
if not any(opt_cols):
raise ValueError('No optimal block data')
iterator = block_sub_info.loc[:,opt_cols].iteritems()
opt = -1
for (name, col) in iterator:
col_opt = col[col != ''].index
if len(col_opt) == 0:
opt = float('inf')
elif len(col_opt) == 1:
opt = max(col_opt[0], opt)
else:
raise ValueError('Multiple entries listed as optimal.')
return opt
def reblock_summary(block_sub_info):
'''Get the data corresponding to the optimal block from the reblocking data.
Parameters
----------
block_sub_info : :class:`pandas.DataFrame` or :class:`pandas.Series`
Reblocking data (i.e. the first item of the tuple returned by ``reblock``),
or a subset thereof containing the statistics columns for one or more data
items.
Returns
-------
summary : :class:`pandas.DataFrame`
Mean, standard error and estimate of the error in the standard error
corresponding to the optimal block size in the reblocking data (or largest
optimal size if multiple data sets are given. The index is labelled with
the data name, if known. An empty DataFrame is returned if no optimal block
size was found.
'''
opt = optimal_block(block_sub_info)
if opt < float('inf'):
summary = block_sub_info.loc[opt]
# Convert to DataFrame, with statistics in columns.
if summary.index.nlevels == 1:
# Sadly don't know the data name; leave to user.
summary = pd.DataFrame(summary).T
else:
# Have hierarchical index; can pivot into a DataFrame.
# Each row will be labelled by the data name.
summary = summary.unstack()
summary.drop('optimal block', axis=1, inplace=True)
else:
summary = pd.DataFrame()
return summary
| 37.42381
| 83
| 0.663189
|
7e3f06dae8fd10587dc2080530d66aa19ea3311d
| 3,861
|
py
|
Python
|
python/tests/test_graph_functions.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | null | null | null |
python/tests/test_graph_functions.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | 39
|
2021-06-08T04:19:00.000Z
|
2022-03-19T17:58:10.000Z
|
python/tests/test_graph_functions.py
|
mhendricks96/data-structures-and-algorithms
|
9c07d284fa8f54a0405a1fc5bda963b6150cc2ef
|
[
"MIT"
] | null | null | null |
from graphs.graphs import Graph, Edge, Vertex
from code_challenges.graph_functions.graph_functions import business_trip
import pytest
def test_business_trip():
my_graph = Graph()
pandora = my_graph.add_node('Pandora')
metroville = my_graph.add_node('Metroville')
narnia = my_graph.add_node('Narnia')
naboo = my_graph.add_node('Naboo')
arendelle = my_graph.add_node('Arendelle')
monstropolis = my_graph.add_node('Monstropolis')
my_graph.add_edge(pandora, arendelle, 150)
my_graph.add_edge(arendelle, pandora, 150)
my_graph.add_edge(pandora, metroville, 82)
my_graph.add_edge(metroville, pandora, 82)
my_graph.add_edge(metroville, arendelle, 99)
my_graph.add_edge(arendelle, metroville, 99)
my_graph.add_edge(metroville, narnia, 37)
my_graph.add_edge(narnia, metroville, 37)
my_graph.add_edge(metroville, monstropolis, 105)
my_graph.add_edge(monstropolis, metroville,105)
my_graph.add_edge(metroville, naboo, 26)
my_graph.add_edge(naboo, metroville, 26)
my_graph.add_edge(monstropolis, naboo, 73)
my_graph.add_edge(naboo, monstropolis, 73)
my_graph.add_edge(narnia, naboo, 250)
my_graph.add_edge(naboo, narnia, 250)
my_graph.add_edge(arendelle, monstropolis, 42)
my_graph.add_edge(monstropolis,arendelle, 42)
actual = business_trip(my_graph, [pandora, arendelle])
expected = True, 150
assert actual == expected
def test_business_trip_2():
my_graph = Graph()
pandora = my_graph.add_node('Pandora')
metroville = my_graph.add_node('Metroville')
narnia = my_graph.add_node('Narnia')
naboo = my_graph.add_node('Naboo')
arendelle = my_graph.add_node('Arendelle')
monstropolis = my_graph.add_node('Monstropolis')
my_graph.add_edge(pandora, arendelle, 150)
my_graph.add_edge(arendelle, pandora, 150)
my_graph.add_edge(pandora, metroville, 82)
my_graph.add_edge(metroville, pandora, 82)
my_graph.add_edge(metroville, arendelle, 99)
my_graph.add_edge(arendelle, metroville, 99)
my_graph.add_edge(metroville, narnia, 37)
my_graph.add_edge(narnia, metroville, 37)
my_graph.add_edge(metroville, monstropolis, 105)
my_graph.add_edge(monstropolis, metroville,105)
my_graph.add_edge(metroville, naboo, 26)
my_graph.add_edge(naboo, metroville, 26)
my_graph.add_edge(monstropolis, naboo, 73)
my_graph.add_edge(naboo, monstropolis, 73)
my_graph.add_edge(narnia, naboo, 250)
my_graph.add_edge(naboo, narnia, 250)
my_graph.add_edge(arendelle, monstropolis, 42)
my_graph.add_edge(monstropolis,arendelle, 42)
actual = business_trip(my_graph, [pandora, arendelle, metroville])
expected = True, 249
assert actual == expected
def test_business_trip_sad():
my_graph = Graph()
pandora = my_graph.add_node('Pandora')
metroville = my_graph.add_node('Metroville')
narnia = my_graph.add_node('Narnia')
naboo = my_graph.add_node('Naboo')
arendelle = my_graph.add_node('Arendelle')
monstropolis = my_graph.add_node('Monstropolis')
my_graph.add_edge(pandora, arendelle, 150)
my_graph.add_edge(arendelle, pandora, 150)
my_graph.add_edge(pandora, metroville, 82)
my_graph.add_edge(metroville, pandora, 82)
my_graph.add_edge(metroville, arendelle, 99)
my_graph.add_edge(arendelle, metroville, 99)
my_graph.add_edge(metroville, narnia, 37)
my_graph.add_edge(narnia, metroville, 37)
my_graph.add_edge(metroville, monstropolis, 105)
my_graph.add_edge(monstropolis, metroville,105)
my_graph.add_edge(metroville, naboo, 26)
my_graph.add_edge(naboo, metroville, 26)
my_graph.add_edge(monstropolis, naboo, 73)
my_graph.add_edge(naboo, monstropolis, 73)
my_graph.add_edge(narnia, naboo, 250)
my_graph.add_edge(naboo, narnia, 250)
my_graph.add_edge(arendelle, monstropolis, 42)
my_graph.add_edge(monstropolis,arendelle, 42)
actual = business_trip(my_graph, [pandora, naboo])
expected = False, 0
assert actual == expected
| 36.424528
| 73
| 0.766641
|
e13a76a319fa7860153724445e5793370b72fce3
| 1,609
|
py
|
Python
|
info.py
|
CPFelix/pytorch-image-models-
|
d0c322b2a55d156b0fe5d9030d9599708a349266
|
[
"Apache-2.0"
] | null | null | null |
info.py
|
CPFelix/pytorch-image-models-
|
d0c322b2a55d156b0fe5d9030d9599708a349266
|
[
"Apache-2.0"
] | null | null | null |
info.py
|
CPFelix/pytorch-image-models-
|
d0c322b2a55d156b0fe5d9030d9599708a349266
|
[
"Apache-2.0"
] | null | null | null |
import timm
from pprint import pprint
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
import torch
# v0.1-rsb-weights
# TEST 8
if __name__ == "__main__":
# print models
model_names = timm.list_models(pretrained=True)
# model_names = timm.list_models('*ran*')
pprint(model_names)
# model = timm.create_model('vit_base_patch16_224', pretrained=True)
# model.eval()
#
# config = resolve_data_config({}, model=model)
# transform = create_transform(**config)
#
# # url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
# # urllib.request.urlretrieve(url, filename)
#
# filename = "./cat.jpg"
# img = Image.open(filename).convert('RGB')
# tensor = transform(img).unsqueeze(0) # transform and add batch dimension
#
# with torch.no_grad():
# out = model(tensor)
# probabilities = torch.nn.functional.softmax(out[0], dim=0)
# print(probabilities.shape)
#
# # Get imagenet class mappings
# # url, filename = (
# # "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
# # urllib.request.urlretrieve(url, filename)
#
# with open("./imagenet_classes.txt", "r") as f:
# categories = [s.strip() for s in f.readlines()]
#
# # Print top categories per image
# top5_prob, top5_catid = torch.topk(probabilities, 5)
# for i in range(top5_prob.size(0)):
# print(categories[top5_catid[i]], top5_prob[i].item())
| 34.978261
| 108
| 0.664388
|
1ef6d247ddb0003c4f526b324caee40322f221ff
| 26,247
|
py
|
Python
|
electrum_xazab/tests/test_blockchain.py
|
nunumichael/electrum-xazab
|
f128c765f451b418a418f9cd8b8e24fd8f66df74
|
[
"MIT"
] | null | null | null |
electrum_xazab/tests/test_blockchain.py
|
nunumichael/electrum-xazab
|
f128c765f451b418a418f9cd8b8e24fd8f66df74
|
[
"MIT"
] | null | null | null |
electrum_xazab/tests/test_blockchain.py
|
nunumichael/electrum-xazab
|
f128c765f451b418a418f9cd8b8e24fd8f66df74
|
[
"MIT"
] | 2
|
2021-05-23T23:38:56.000Z
|
2021-05-24T19:01:07.000Z
|
import shutil
import tempfile
import os
from electrum_xazab import constants, blockchain
from electrum_xazab.simple_config import SimpleConfig
from electrum_xazab.blockchain import Blockchain, deserialize_header, hash_header
from electrum_xazab.util import bh2u, bfh, make_dir
from . import ElectrumTestCase
class TestBlockchain(ElectrumTestCase):
HEADERS = {
'A': deserialize_header(bfh("010000000000000000000000000000000000000000000000000000000000000000000000c762a6567f3cc092f0684bb62b7e00a84890b990f07cc71a6bb58d64b98e02e0 b9968054 ffff7f20 ffba1000"), 0),
'B': deserialize_header(bfh("000000202e3df23eec5cd6a86edd509539028e2c3a3dc05315eb28f2baa43218ca080000186c8dfd970a4545f79916bc1d75c9d00432f57c89209bf3bb115b7612848f509c25f45bffff7f2000000000"), 1),
'C': deserialize_header(bfh("000000200a8be74779a59fec4f56abd6ce33bf2a8a1e896b0290a2aba90cf8fa6e6a88f7bf2cbf153013a1c54abaf70e95198fcef2f3059cc6b4d0f7e876808e7d24d11cc825f45bffff7f2000000000"), 2),
'D': deserialize_header(bfh("000000204a030521422dda1f980cfc2b38149edd3d8eab547e6efa3ab855048feb68dbdae71019d7feecd9b8596eca9a67032c5f4641b23b5d731dc393e37de7f9c2f299e725f45bffff7f2000000000"), 3),
'E': deserialize_header(bfh("00000020e39959c005b364248b24a17a72fcfe89d8478c71645b85edd444031ef5e5f896a3586da94c71753f27c075f57f44faf913c31177a0957bbda42e7699e3a2141aed25f45bffff7f2001000000"), 4),
'F': deserialize_header(bfh("00000020d02b1711b7bc72feb7b3e599e9f9bb67f163c95203a64f6dcd4f6176c15d31437aee1d692d1615c3bdf52c291032144ce9e3b258a473c17c745047f3431ff8e2ee25f45bffff7f2000000000"), 5),
'O': deserialize_header(bfh("00000020ed0bfee047765d7f4233106a13b4ff6d6c67f7ef9aec0e7466759f00ea74b2613a141ce635cbb1cd2b3a4fcdd0a3380517845ba41736c82a79cab535d31128066526f45bffff7f2001000000"), 6),
'P': deserialize_header(bfh("000000201f9b9f1e295fd4eda90b03b62a676f93642d28c258d8222a2e9d5f0c75cae0a99690c2fe7c1a4450c74dc908fe94dd96c3b0637d51475e9e06a78e944a0c7fe28126f45bffff7f2000000000"), 7),
'Q': deserialize_header(bfh("000000200076268f577977b9e7386f68a9c3c332aa613d27243abd8167a1bd891adf404f148be228a4c3f2061bafe7efdfc4a8d5a94759464b9b5c619994d45dfcaf49e1a126f45bffff7f2000000000"), 8),
'R': deserialize_header(bfh("000000208cfac7d4caa975c6b7fe770a8ea35a77a02f6e9b1900bae67a389619095c757515681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a462555822552221a626f45bffff7f2000000000"), 9),
'S': deserialize_header(bfh("00000020936defed88e60da5cef2106338ef9ec221d65e9226f1fc29ec76e4b7c34a649c9dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548fab26f45bffff7f2000000000"), 10),
'T': deserialize_header(bfh("00000020372528176ba7c014b6f388ba338c7a87a5c50bc4d8a1a1d5900cbf5725e6822903b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe064b026f45bffff7f2002000000"), 11),
'U': deserialize_header(bfh("00000020c5a999182175cb571c7a15a08b8577e21b67c156a2c0ceebcce0d897e664fc3ad67cb902a7d807cee7676cb543feec3e053aa824d5dfb528d5b94f9760313d9db726f45bffff7f2001000000"), 12),
'G': deserialize_header(bfh("00000020ed0bfee047765d7f4233106a13b4ff6d6c67f7ef9aec0e7466759f00ea74b2613a141ce635cbb1cd2b3a4fcdd0a3380517845ba41736c82a79cab535d31128066928f45bffff7f2001000000"), 6),
'H': deserialize_header(bfh("00000020f8ca2216e002361e7cc1dd3e1197443e0b8068adaeec43d14be0e4f2159659e39690c2fe7c1a4450c74dc908fe94dd96c3b0637d51475e9e06a78e944a0c7fe26a28f45bffff7f2002000000"), 7),
'I': deserialize_header(bfh("00000020996b8b880bfe34b81dda59ae28ee28625a4dff565f671540a4703ebabd0ab991148be228a4c3f2061bafe7efdfc4a8d5a94759464b9b5c619994d45dfcaf49e16a28f45bffff7f2000000000"), 8),
'J': deserialize_header(bfh("000000201d5a4dfeeda94c6e4c3e40ce5c30df07e8103dba70cbce9d6b0890405c76b06715681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a462555822552221c928f45bffff7f2000000000"), 9),
'K': deserialize_header(bfh("00000020f93c46944a529187faae721951e66e187a0e910104e91ec8d1d4a914cadd79a89dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548fca28f45bffff7f2000000000"), 10),
'L': deserialize_header(bfh("00000020d76bdf59ed1ce4a4a31aa7649f8a39da2b956515f3bdb78b2bcdaaed60444bad03b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe064ca28f45bffff7f2000000000"), 11),
'M': deserialize_header(bfh("000000201d5a4dfeeda94c6e4c3e40ce5c30df07e8103dba70cbce9d6b0890405c76b06715681cb2d00ff889193f6a68a93f5096aeb2d84ca0af6185a4625558225522214229f45bffff7f2000000000"), 9),
'N': deserialize_header(bfh("00000020ff8ef64ad77c7c02103127be41dc39dda5f4dd17cbbaa7475fa8b7a3dd110ee19dc087fc977b06c24a69c682d1afd1020e6dc1f087571ccec66310a786e1548f4329f45bffff7f2003000000"), 10),
'X': deserialize_header(bfh("000000202857b96792f630a80f7c834afd5985b833794037930c1fe655c23b6eb769c85203b243756c25053253aeda309604363460a3911015929e68705bd89dff6fe0649b29f45bffff7f2002000000"), 11),
'Y': deserialize_header(bfh("000000206cc9a0dec93cffaab358ef9bd06fa0137d53e37a4b251f57da831ef31fccf9f2d67cb902a7d807cee7676cb543feec3e053aa824d5dfb528d5b94f9760313d9d9b29f45bffff7f2000000000"), 12),
'Z': deserialize_header(bfh("00000020756a6bfe58694141de4abf3317bccfa105b5ec30b997dda15a9ab02a9d86eba00f2596c29203f8a0f71ae94193092dc8f113be3dbee4579f1e649fa3d6dcc38c622ef45bffff7f2003000000"), 13),
}
# tree of headers:
# - M <- N <- X <- Y <- Z
# /
# - G <- H <- I <- J <- K <- L
# /
# A <- B <- C <- D <- E <- F <- O <- P <- Q <- R <- S <- T <- U
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_regtest()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
def setUp(self):
super().setUp()
self.data_dir = self.electrum_path
make_dir(os.path.join(self.data_dir, 'forks'))
self.config = SimpleConfig({'electrum_path': self.data_dir})
blockchain.blockchains = {}
def _append_header(self, chain: Blockchain, header: dict):
self.assertTrue(chain.can_connect(header))
chain.save_header(header)
def test_get_height_of_last_common_block_with_chain(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
self._append_header(chain_l, self.HEADERS['L'])
self.assertEqual({chain_u: 8, chain_l: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11}, chain_l.get_parent_heights())
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
self.assertEqual({chain_u: 8, chain_z: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11, chain_z: 8}, chain_l.get_parent_heights())
self.assertEqual({chain_z: 13}, chain_z.get_parent_heights())
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_l))
self.assertEqual(5, chain_l.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(5, chain_z.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(8, chain_l.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(8, chain_z.get_height_of_last_common_block_with_chain(chain_l))
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self.assertEqual({chain_u: 12, chain_z: 5}, chain_u.get_parent_heights())
self.assertEqual({chain_l: 11, chain_z: 8}, chain_l.get_parent_heights())
self.assertEqual({chain_z: 13}, chain_z.get_parent_heights())
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_l))
self.assertEqual(5, chain_l.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(5, chain_u.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(5, chain_z.get_height_of_last_common_block_with_chain(chain_u))
self.assertEqual(8, chain_l.get_height_of_last_common_block_with_chain(chain_z))
self.assertEqual(8, chain_z.get_height_of_last_common_block_with_chain(chain_l))
def test_parents_after_forking(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self.assertEqual(None, chain_u.parent)
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
self._append_header(chain_l, self.HEADERS['L'])
self.assertEqual(None, chain_l.parent)
self.assertEqual(chain_l, chain_u.parent)
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(None, chain_z.parent)
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(None, chain_z.parent)
def test_forking_and_swapping(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self._append_header(chain_u, self.HEADERS['R'])
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
# do checks
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(0, chain_u.forkpoint)
self.assertEqual(None, chain_u.parent)
self.assertEqual(constants.net.GENESIS, chain_u._forkpoint_hash)
self.assertEqual(None, chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_u.path())
self.assertEqual(10 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(6, chain_l.forkpoint)
self.assertEqual(chain_u, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['G']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_61b274ea009f7566740eec9aeff7676c6dffb4136a1033427f5d7647e0fe0bed_e3599615f2e4e04bd143ecaead68800b3e4497113eddc17c1e3602e01622caf8"), chain_l.path())
self.assertEqual(4 * 80, os.stat(chain_l.path()).st_size)
self._append_header(chain_l, self.HEADERS['K'])
# chains were swapped, do checks
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_l, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_61b274ea009f7566740eec9aeff7676c6dffb4136a1033427f5d7647e0fe0bed_a9e0ca750c5f9d2e2a22d858c2282d64936f672ab6030ba9edd45f291e9f9b1f"), chain_u.path())
self.assertEqual(4 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(0, chain_l.forkpoint)
self.assertEqual(None, chain_l.parent)
self.assertEqual(constants.net.GENESIS, chain_l._forkpoint_hash)
self.assertEqual(None, chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_l.path())
self.assertEqual(11 * 80, os.stat(chain_l.path()).st_size)
for b in (chain_u, chain_l):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
self._append_header(chain_u, self.HEADERS['S'])
self._append_header(chain_u, self.HEADERS['T'])
self._append_header(chain_u, self.HEADERS['U'])
self._append_header(chain_l, self.HEADERS['L'])
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
# chain_z became best chain, do checks
self.assertEqual(3, len(blockchain.blockchains))
self.assertEqual(2, len(os.listdir(os.path.join(self.data_dir, "forks"))))
self.assertEqual(0, chain_z.forkpoint)
self.assertEqual(None, chain_z.parent)
self.assertEqual(constants.net.GENESIS, chain_z._forkpoint_hash)
self.assertEqual(None, chain_z._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_z.path())
self.assertEqual(14 * 80, os.stat(chain_z.path()).st_size)
self.assertEqual(9, chain_l.forkpoint)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['J']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['I']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_9_67b0765c4090086b9dcecb70ba3d10e807df305cce403e4c6e4ca9edfe4d5a1d_a879ddca14a9d4d1c81ee90401910e7a186ee6511972aefa8791524a94463cf9"), chain_l.path())
self.assertEqual(3 * 80, os.stat(chain_l.path()).st_size)
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_61b274ea009f7566740eec9aeff7676c6dffb4136a1033427f5d7647e0fe0bed_a9e0ca750c5f9d2e2a22d858c2282d64936f672ab6030ba9edd45f291e9f9b1f"), chain_u.path())
self.assertEqual(7 * 80, os.stat(chain_u.path()).st_size)
for b in (chain_u, chain_l, chain_z):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
self.assertEqual(constants.net.GENESIS, chain_z.get_hash(0))
self.assertEqual(hash_header(self.HEADERS['F']), chain_z.get_hash(5))
self.assertEqual(hash_header(self.HEADERS['G']), chain_z.get_hash(6))
self.assertEqual(hash_header(self.HEADERS['I']), chain_z.get_hash(8))
self.assertEqual(hash_header(self.HEADERS['M']), chain_z.get_hash(9))
self.assertEqual(hash_header(self.HEADERS['Z']), chain_z.get_hash(13))
def test_doing_multiple_swaps_after_single_new_header(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
self._append_header(chain_u, self.HEADERS['R'])
self._append_header(chain_u, self.HEADERS['S'])
self.assertEqual(1, len(blockchain.blockchains))
self.assertEqual(0, len(os.listdir(os.path.join(self.data_dir, "forks"))))
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
# now chain_u is best chain, but it's tied with chain_l
self.assertEqual(2, len(blockchain.blockchains))
self.assertEqual(1, len(os.listdir(os.path.join(self.data_dir, "forks"))))
chain_z = chain_l.fork(self.HEADERS['M'])
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self.assertEqual(3, len(blockchain.blockchains))
self.assertEqual(2, len(os.listdir(os.path.join(self.data_dir, "forks"))))
# chain_z became best chain, do checks
self.assertEqual(0, chain_z.forkpoint)
self.assertEqual(None, chain_z.parent)
self.assertEqual(constants.net.GENESIS, chain_z._forkpoint_hash)
self.assertEqual(None, chain_z._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "blockchain_headers"), chain_z.path())
self.assertEqual(12 * 80, os.stat(chain_z.path()).st_size)
self.assertEqual(9, chain_l.forkpoint)
self.assertEqual(chain_z, chain_l.parent)
self.assertEqual(hash_header(self.HEADERS['J']), chain_l._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['I']), chain_l._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_9_67b0765c4090086b9dcecb70ba3d10e807df305cce403e4c6e4ca9edfe4d5a1d_a879ddca14a9d4d1c81ee90401910e7a186ee6511972aefa8791524a94463cf9"), chain_l.path())
self.assertEqual(2 * 80, os.stat(chain_l.path()).st_size)
self.assertEqual(6, chain_u.forkpoint)
self.assertEqual(chain_z, chain_u.parent)
self.assertEqual(hash_header(self.HEADERS['O']), chain_u._forkpoint_hash)
self.assertEqual(hash_header(self.HEADERS['F']), chain_u._prev_hash)
self.assertEqual(os.path.join(self.data_dir, "forks", "fork2_6_61b274ea009f7566740eec9aeff7676c6dffb4136a1033427f5d7647e0fe0bed_a9e0ca750c5f9d2e2a22d858c2282d64936f672ab6030ba9edd45f291e9f9b1f"), chain_u.path())
self.assertEqual(5 * 80, os.stat(chain_u.path()).st_size)
self.assertEqual(constants.net.GENESIS, chain_z.get_hash(0))
self.assertEqual(hash_header(self.HEADERS['F']), chain_z.get_hash(5))
self.assertEqual(hash_header(self.HEADERS['G']), chain_z.get_hash(6))
self.assertEqual(hash_header(self.HEADERS['I']), chain_z.get_hash(8))
self.assertEqual(hash_header(self.HEADERS['M']), chain_z.get_hash(9))
self.assertEqual(hash_header(self.HEADERS['X']), chain_z.get_hash(11))
for b in (chain_u, chain_l, chain_z):
self.assertTrue(all([b.can_connect(b.read_header(i), False) for i in range(b.height())]))
def get_chains_that_contain_header_helper(self, header: dict):
height = header['block_height']
header_hash = hash_header(header)
return blockchain.get_chains_that_contain_header(height, header_hash)
def test_get_chains_that_contain_header(self):
blockchain.blockchains[constants.net.GENESIS] = chain_u = Blockchain(
config=self.config, forkpoint=0, parent=None,
forkpoint_hash=constants.net.GENESIS, prev_hash=None)
open(chain_u.path(), 'w+').close()
self._append_header(chain_u, self.HEADERS['A'])
self._append_header(chain_u, self.HEADERS['B'])
self._append_header(chain_u, self.HEADERS['C'])
self._append_header(chain_u, self.HEADERS['D'])
self._append_header(chain_u, self.HEADERS['E'])
self._append_header(chain_u, self.HEADERS['F'])
self._append_header(chain_u, self.HEADERS['O'])
self._append_header(chain_u, self.HEADERS['P'])
self._append_header(chain_u, self.HEADERS['Q'])
chain_l = chain_u.fork(self.HEADERS['G'])
self._append_header(chain_l, self.HEADERS['H'])
self._append_header(chain_l, self.HEADERS['I'])
self._append_header(chain_l, self.HEADERS['J'])
self._append_header(chain_l, self.HEADERS['K'])
self._append_header(chain_l, self.HEADERS['L'])
chain_z = chain_l.fork(self.HEADERS['M'])
self.assertEqual([chain_l, chain_z, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['A']))
self.assertEqual([chain_l, chain_z, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['C']))
self.assertEqual([chain_l, chain_z, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['F']))
self.assertEqual([chain_l, chain_z], self.get_chains_that_contain_header_helper(self.HEADERS['G']))
self.assertEqual([chain_l, chain_z], self.get_chains_that_contain_header_helper(self.HEADERS['I']))
self.assertEqual([chain_z], self.get_chains_that_contain_header_helper(self.HEADERS['M']))
self.assertEqual([chain_l], self.get_chains_that_contain_header_helper(self.HEADERS['K']))
self._append_header(chain_z, self.HEADERS['N'])
self._append_header(chain_z, self.HEADERS['X'])
self._append_header(chain_z, self.HEADERS['Y'])
self._append_header(chain_z, self.HEADERS['Z'])
self.assertEqual([chain_z, chain_l, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['A']))
self.assertEqual([chain_z, chain_l, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['C']))
self.assertEqual([chain_z, chain_l, chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['F']))
self.assertEqual([chain_u], self.get_chains_that_contain_header_helper(self.HEADERS['O']))
self.assertEqual([chain_z, chain_l], self.get_chains_that_contain_header_helper(self.HEADERS['I']))
class TestVerifyHeader(ElectrumTestCase):
# Data for Bitcoin block header #100.
valid_header = "0000002005128ad0cce5c4b9563a6641d2e089bca9f713ff1f8cff271600000000000000f034f3e8d65d3dc002bc0f1bba334ba6fa2bcd4fa4d322f1921768260d2ee380e1dafa5e1ecb17194a38bac2"
target = Blockchain.bits_to_target(420989726)
prev_hash = "000000000000001627ff8c1fff13f7a9bc89e0d241663a56b9c4e5ccd08a1205"
def setUp(self):
super().setUp()
self.header = deserialize_header(bfh(self.valid_header), 1296288)
def test_valid_header(self):
Blockchain.verify_header(self.header, self.prev_hash, self.target)
def test_expected_hash_mismatch(self):
with self.assertRaises(Exception):
Blockchain.verify_header(self.header, self.prev_hash, self.target,
expected_header_hash="foo")
def test_prev_hash_mismatch(self):
with self.assertRaises(Exception):
Blockchain.verify_header(self.header, "foo", self.target)
def test_target_mismatch(self):
with self.assertRaises(Exception):
other_target = Blockchain.bits_to_target(0x1d00eeee)
Blockchain.verify_header(self.header, self.prev_hash, other_target)
def test_insufficient_pow(self):
with self.assertRaises(Exception):
self.header["nonce"] = 42
Blockchain.verify_header(self.header, self.prev_hash, self.target)
| 62.492857
| 219
| 0.730102
|
b188bf6ac102b56ef29e02656757aa71a4298d07
| 810
|
py
|
Python
|
crop_images.py
|
ashok-arjun/Gaussian-Poisson-GANs-For-Image-Blending
|
878374ae7b11e41c8eebb9cc6281cdda33506c22
|
[
"MIT"
] | 1
|
2021-03-15T13:44:04.000Z
|
2021-03-15T13:44:04.000Z
|
crop_images.py
|
aiarjun/Gaussian-Poisson-GANs-For-Image-Blending
|
7eeec778c6b14df32588c320609ac50004add0a6
|
[
"MIT"
] | 1
|
2022-01-13T03:54:13.000Z
|
2022-01-13T03:54:13.000Z
|
crop_images.py
|
aiarjun/Gaussian-Poisson-GANs-For-Image-Blending
|
7eeec778c6b14df32588c320609ac50004add0a6
|
[
"MIT"
] | null | null | null |
import glob
import os
from skimage.io import imread, imsave
def crop_images(data_dir, result_dir):
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
print('Cropped images will be saved to {} ...\n'.format(result_dir))
with open('data/bbox.txt') as f:
for line in f:
name, bbox = line.strip().split(':')
sx, sy, ex, ey = [int(i) for i in bbox.split(',')]
print('Processing {} ...'.format(name))
images = glob.glob(os.path.join(data_dir, name, '*'))
if not os.path.isdir(os.path.join(result_dir, name)):
os.makedirs(os.path.join(result_dir, name))
for image in images:
full_image = imread(image)
cropped_image = full_image[sx:ex, sy:ey]
imsave(os.path.join(result_dir, name, os.path.basename(image)), cropped_image)
| 31.153846
| 86
| 0.64321
|
498be439632585c12fbf624e5449fa255871d8af
| 611
|
py
|
Python
|
modules/jokes.py
|
hasibulkabir/friday-bot
|
6d7b0441baeb295029570d96b523b2603b92925e
|
[
"MIT"
] | 5
|
2017-07-15T07:27:47.000Z
|
2021-01-27T12:29:37.000Z
|
modules/jokes.py
|
hasibulkabir/friday-bot
|
6d7b0441baeb295029570d96b523b2603b92925e
|
[
"MIT"
] | null | null | null |
modules/jokes.py
|
hasibulkabir/friday-bot
|
6d7b0441baeb295029570d96b523b2603b92925e
|
[
"MIT"
] | 4
|
2017-01-27T01:25:18.000Z
|
2020-10-04T08:03:12.000Z
|
import requests , random
from bs4 import BeautifulSoup as BS
def randomJoke():
jokeUrl = "http://www.santabanta.com/jokes/?page=" + str(random.randint(1, 1050))
res = requests.get(jokeUrl)
soup = BS(res.text, 'html.parser')
result = soup.find_all('span', {'class': 'sms_text'})
return random.choice(result).text
def randomMeme():
memeUrl = "http://belikebill.azurewebsites.net/billgen-API.php?default=1"
res = requests.get(memeUrl)
file = open("meme.jpg" , "wb")
for i in res.iter_content(1000):
file.write(i)
file.close()
return "OK"
| 29.095238
| 86
| 0.635025
|
3e583de73c9370ddbb954bedc9ff2b67308d306e
| 1,028
|
py
|
Python
|
semseg/augmentations/__init__.py
|
ManuelFritsche/flow-consistency
|
90625fe25855aa11c6245ca242ab8d66c41f4726
|
[
"MIT"
] | 4
|
2020-06-14T00:35:49.000Z
|
2021-09-02T11:08:47.000Z
|
semseg/augmentations/__init__.py
|
ManuelFritsche/FlowConsistency
|
90625fe25855aa11c6245ca242ab8d66c41f4726
|
[
"MIT"
] | 1
|
2019-12-05T20:06:14.000Z
|
2020-01-05T15:06:55.000Z
|
semseg/augmentations/__init__.py
|
ManuelFritsche/flow-consistency
|
90625fe25855aa11c6245ca242ab8d66c41f4726
|
[
"MIT"
] | 2
|
2019-01-26T03:16:25.000Z
|
2019-02-25T22:52:34.000Z
|
import logging
from semseg.augmentations.augmentations import *
logger = logging.getLogger('semseg')
key2aug = {'gamma': AdjustGamma,
'hue': AdjustHue,
'brightness': AdjustBrightness,
'saturation': AdjustSaturation,
'contrast': AdjustContrast,
'rcrop': RandomCrop,
'hflip': RandomHorizontallyFlip,
'vflip': RandomVerticallyFlip,
'scale': Scale,
'rsize': RandomSized,
'rsizecrop': RandomSizedCrop,
'rotate': RandomRotate,
'translate': RandomTranslate,
'ccrop': CenterCrop,}
def get_composed_augmentations(aug_dict):
if aug_dict is None:
logger.info("Using No Augmentations")
return None
augmentations = []
for aug_key, aug_param in aug_dict.items():
augmentations.append(key2aug[aug_key](aug_param))
logger.info("Using {} aug with params {}".format(aug_key, aug_param))
return Compose(augmentations)
| 31.151515
| 78
| 0.607004
|
370c334ec2653852501df7092dbc14a647c31abb
| 8,937
|
py
|
Python
|
cca.py
|
petr-bauch/cca
|
6011c602e1184fff4fc9a9b880cb59a070746929
|
[
"Apache-2.0"
] | 4
|
2021-03-26T01:43:06.000Z
|
2022-02-22T13:16:26.000Z
|
cca.py
|
petr-bauch/cca
|
6011c602e1184fff4fc9a9b880cb59a070746929
|
[
"Apache-2.0"
] | 2
|
2020-10-25T07:44:54.000Z
|
2021-03-28T08:15:14.000Z
|
cca.py
|
petr-bauch/cca
|
6011c602e1184fff4fc9a9b880cb59a070746929
|
[
"Apache-2.0"
] | 3
|
2017-02-28T02:57:32.000Z
|
2022-02-09T07:01:11.000Z
|
#!/usr/bin/env python3
'''
A driver script for CCA container image
Copyright 2021 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
import time
import shutil
from datetime import datetime, timedelta
from subprocess import Popen, run
from threading import Thread
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
IMAGE_NAME = 'codinuum/cca'
#IMAGE_NAME = 'ccax'
#
CCA_HOME = '/opt/cca'
CCA_VAR = '/var/lib/cca'
CCA_LOG_DIR = '/var/log/cca'
CCA_SOURCE_DIR = CCA_VAR+'/source'
CCA_CACHE_DIR = CCA_VAR+'/cache'
CCA_WORK_DIR_NAME = '__CCA__'
CONTAINER_CMD = 'docker'
TIMEOUT = 5
BUFSIZE = 0 # unbuffered
STAT_NAME = 'status'
DEFAULT_CACHE_DIR = os.path.join(os.environ['HOME'], '.cca', 'cache')
#WIN_HOST_FLAG = sys.platform.startswith('win')
### timezone
TZ = None
if time.timezone != 0:
SIGN = '+' if time.timezone > 0 else '-'
STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
dt = datetime.now()
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
isdst = tt.tm_isdst > 0
tzname = None
offset = 0
if isdst:
tzname = time.tzname[1]
offset = DSTOFFSET
else:
tzname = time.tzname[0]
offset = STDOFFSET
TZ = '{}{}{}'.format(tzname, SIGN, offset)
###
def progress(proc, stat_path, timeout=TIMEOUT):
stat_mtime = None
print('\nMonitoring thread started.')
while True:
try:
st = os.stat(stat_path)
if st.st_mtime != stat_mtime and st.st_size > 0:
with open(stat_path, 'r') as f:
mes = f.read()
print('[{}]'.format(mes))
stat_mtime = st.st_mtime
except OSError as e:
pass
if proc.poll() is not None:
break
proc.wait()
if proc.returncode > 0:
print('Execution failed: {}'.format(proc.returncode))
def ensure_dir(dpath):
if not os.path.isdir(dpath):
try:
os.makedirs(dpath)
except Exception as e:
raise
def get_image_name(image_name, devel=False):
suffix = ''
if devel:
suffix = ':devel'
image = image_name+suffix
return image
def run_diffast(container_cmd, original, modified, cache=DEFAULT_CACHE_DIR, clear_cache=False, view=False,
dry_run=False, devel=False, image=IMAGE_NAME, verbose=False, debug=False):
if dry_run:
verbose = True
original = os.path.abspath(original)
modified = os.path.abspath(modified)
cache = os.path.abspath(cache)
if not dry_run:
ensure_dir(cache)
cca_cmd_path = '{}/bin/{}.opt'.format(CCA_HOME, 'diffast')
cca_cmd = cca_cmd_path
if clear_cache:
cca_cmd += ' -clearcache'
cca_cmd += ' -cache {}'.format(CCA_CACHE_DIR)
orig_dir = os.path.dirname(original)
mod_dir = os.path.dirname(modified)
common_path = os.path.commonpath([orig_dir, mod_dir])
orig_path = CCA_SOURCE_DIR+'/'+os.path.relpath(original, start=common_path)
mod_path = CCA_SOURCE_DIR+'/'+os.path.relpath(modified, start=common_path)
cca_cmd += ' {} {}'.format(orig_path, mod_path)
vol_opt = '-v "{}:{}"'.format(common_path, CCA_SOURCE_DIR)
vol_opt += ' -v "{}:{}"'.format(cache, CCA_CACHE_DIR)
run_cmd = '{} run'.format(container_cmd)
run_cmd += ' --rm'
run_cmd += ' -t'
if TZ:
run_cmd += ' -e "TZ={}"'.format(TZ)
run_cmd += ' {}'.format(vol_opt)
run_cmd += ' {} {}'.format(get_image_name(image, devel=devel), cca_cmd)
if verbose:
print(run_cmd)
if not dry_run:
try:
rc = run(run_cmd, bufsize=BUFSIZE, shell=True, universal_newlines=True)
if view:
app_path = os.path.join(os.path.dirname(sys.argv[0]),
'diffviewer',
'DiffViewer-darwin-x64',
'DiffViewer.app')
if os.path.exists(app_path):
cache_opt = ' --cache {}'.format(cache)
files_opt = ' --file0 {} --file1 {}'.format(original, modified)
view_cmd = 'open -n {} --args{}{}'.format(app_path, cache_opt, files_opt)
if verbose:
print(view_cmd)
rc = run(view_cmd, shell=True)
else:
print('DiffViewer not found. See diffviewer/README.md.')
except (KeyboardInterrupt, SystemExit):
print('Interrupted.')
except OSError as e:
print('Execution failed: {}'.format(e))
def gen_work_dir_name():
dt = datetime.now()
ts = '{:04}{:02}{:02}{:02}{:02}{:02}'.format(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
dn = '{}{}'.format(CCA_WORK_DIR_NAME, ts)
return dn
def update(args):
cmd = '{} pull {}'.format(args.container_cmd, get_image_name(args.image, devel=args.devel))
if args.verbose or args.dry_run:
print(cmd)
if not args.dry_run:
try:
run(cmd, shell=True)
except OSError as e:
print('Execution failed: {}'.format(e))
def diffast(args):
run_diffast(args.container_cmd,
args.original, args.modified, cache=args.cache, clear_cache=args.force, view=args.view,
dry_run=args.dry_run, devel=args.devel, image=args.image, verbose=args.verbose, debug=args.debug)
def main():
parser = ArgumentParser(description='A CCA driver',
add_help=False,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--dry-run', dest='dry_run', action='store_true',
help='only print container commands')
parser.add_argument('--container-command', dest='container_cmd', metavar='CMD',
help='specify container command', default=CONTAINER_CMD)
parser.add_argument('-i', '--image', dest='image', type=str, metavar='IMAGE', default=IMAGE_NAME,
help='specify container image')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='enable verbose printing')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='enable debug printing')
parser.add_argument('-x', '--experimental', dest='devel', action='store_true',
help='use experimental image')
p = ArgumentParser(add_help=True)
subparsers = p.add_subparsers(title='subcommands')
# Docker image update
parser_update = subparsers.add_parser('update',
description='Update docker image of CCA',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_update.set_defaults(func=update)
# Diff/AST
parser_diffast = subparsers.add_parser('diffast',
description='Compare two programs',
parents=[parser],
formatter_class=ArgumentDefaultsHelpFormatter)
parser_diffast.add_argument('original', type=str, metavar='ORIGINAL', help='original source file')
parser_diffast.add_argument('modified', type=str, metavar='MODIFIED', help='modified source file')
parser_diffast.add_argument('--view', dest='view', action='store_true',
help='launch DiffViewer after comparison')
parser_diffast.add_argument('-f', '--force', dest='force', action='store_true',
help='force comparison (overwrite cache)')
parser_diffast.add_argument('-c', '--cache', dest='cache', default=DEFAULT_CACHE_DIR,
metavar='DIR', type=str, help='result cache directory')
parser_diffast.set_defaults(func=diffast)
#
args = p.parse_args()
try:
args.func(args)
except:
#raise
p.print_help()
if __name__ == '__main__':
main()
| 30.294915
| 113
| 0.594047
|
1c9f4ca5c9d9a2927f4fd03270feb5d92cad4892
| 19,528
|
py
|
Python
|
mkt/submit/forms.py
|
chrisdavidmills/zamboni
|
09e05bad2570663d25408793289c81324d3e952e
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/submit/forms.py
|
chrisdavidmills/zamboni
|
09e05bad2570663d25408793289c81324d3e952e
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/submit/forms.py
|
chrisdavidmills/zamboni
|
09e05bad2570663d25408793289c81324d3e952e
|
[
"BSD-3-Clause"
] | 1
|
2021-03-13T00:33:12.000Z
|
2021-03-13T00:33:12.000Z
|
import datetime
from collections import defaultdict
from django import forms
import happyforms
import waffle
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from addons.models import Addon, AddonUpsell, BlacklistedSlug, Webapp
from amo.utils import slug_validator
from apps.users.models import UserNotification
from apps.users.notifications import app_surveys
from editors.models import RereviewQueue
from files.models import FileUpload
from files.utils import parse_addon
from market.models import AddonPremium, Price
from translations.fields import TransField
from translations.forms import TranslationFormMixin
from translations.widgets import TransInput, TransTextarea
from mkt.constants import APP_FEATURES, FREE_PLATFORMS, PAID_PLATFORMS
from mkt.site.forms import AddonChoiceField, APP_PUBLIC_CHOICES
from mkt.webapps.models import AppFeatures
from mkt.developers.forms import verify_app_domain
def mark_for_rereview(addon, added_devices, removed_devices):
msg = _(u'Device(s) changed: {0}').format(', '.join(
[_(u'Added {0}').format(unicode(amo.DEVICE_TYPES[d].name))
for d in added_devices] +
[_(u'Removed {0}').format(unicode(amo.DEVICE_TYPES[d].name))
for d in removed_devices]))
RereviewQueue.flag(addon, amo.LOG.REREVIEW_DEVICES_ADDED, msg)
def mark_for_rereview_features_change(addon, added_features, removed_features):
# L10n: {0} is the list of requirements changes.
msg = _(u'Requirements changed: {0}').format(', '.join(
[_(u'Added {0}').format(f) for f in added_features] +
[_(u'Removed {0}').format(f) for f in removed_features]))
RereviewQueue.flag(addon, amo.LOG.REREVIEW_FEATURES_CHANGED, msg)
class DeviceTypeForm(happyforms.Form):
ERRORS = {
'both': _lazy(u'Cannot be free and paid.'),
'none': _lazy(u'Please select a device.'),
'packaged': _lazy(u'Packaged apps are not yet supported for those '
u'platforms.'),
}
free_platforms = forms.MultipleChoiceField(
choices=FREE_PLATFORMS(), required=False)
paid_platforms = forms.MultipleChoiceField(
choices=PAID_PLATFORMS(), required=False)
def save(self, addon, is_paid):
data = self.cleaned_data[
'paid_platforms' if is_paid else 'free_platforms']
submitted_data = self.get_devices(t.split('-', 1)[1] for t in data)
new_types = set(dev.id for dev in submitted_data)
old_types = set(amo.DEVICE_TYPES[x.id].id for x in addon.device_types)
added_devices = new_types - old_types
removed_devices = old_types - new_types
for d in added_devices:
addon.addondevicetype_set.create(device_type=d)
for d in removed_devices:
addon.addondevicetype_set.filter(device_type=d).delete()
# Send app to re-review queue if public and new devices are added.
if added_devices and addon.status in amo.WEBAPPS_APPROVED_STATUSES:
mark_for_rereview(addon, added_devices, removed_devices)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def _get_combined(self):
devices = (self.cleaned_data.get('free_platforms', []) +
self.cleaned_data.get('paid_platforms', []))
return set(d.split('-', 1)[1] for d in devices)
def _set_packaged_errors(self):
"""Add packaged-app submission errors for incompatible platforms."""
devices = self._get_combined()
bad_android = (
not waffle.flag_is_active(self.request, 'android-packaged') and
('android-mobile' in devices or 'android-tablet' in devices)
)
bad_desktop = (
not waffle.flag_is_active(self.request, 'desktop-packaged') and
'desktop' in devices
)
if bad_android or bad_desktop:
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS['packaged'])
def clean(self):
data = self.cleaned_data
paid = data.get('paid_platforms', [])
free = data.get('free_platforms', [])
# Check that they didn't select both.
if free and paid:
self._add_error('both')
return data
# Check that they selected one.
if not free and not paid:
self._add_error('none')
return data
return super(DeviceTypeForm, self).clean()
def get_devices(self, source=None):
"""Returns a device based on the requested free or paid."""
if source is None:
source = self._get_combined()
platforms = {'firefoxos': amo.DEVICE_GAIA,
'desktop': amo.DEVICE_DESKTOP,
'android-mobile': amo.DEVICE_MOBILE,
'android-tablet': amo.DEVICE_TABLET}
return map(platforms.get, source)
def is_paid(self):
return bool(self.cleaned_data.get('paid_platforms', False))
def get_paid(self):
"""Returns the premium type. Should not be used if the form is used to
modify an existing app.
"""
return amo.ADDON_PREMIUM if self.is_paid() else amo.ADDON_FREE
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree and Continue'),
widget=forms.HiddenInput)
newsletter = forms.BooleanField(required=False, label=app_surveys.label,
widget=forms.CheckboxInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.datetime.now()
self.instance.save()
if self.cleaned_data.get('newsletter'):
UserNotification.update_or_create(user=self.instance,
notification_id=app_surveys.id, update={'enabled': True})
class NewWebappVersionForm(happyforms.Form):
upload_error = _lazy(u'There was an error with your upload. '
u'Please try again.')
upload = forms.ModelChoiceField(widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': upload_error})
def __init__(self, *args, **kw):
request = kw.pop('request', None)
self.addon = kw.pop('addon', None)
self._is_packaged = kw.pop('is_packaged', False)
super(NewWebappVersionForm, self).__init__(*args, **kw)
if (not waffle.flag_is_active(request, 'allow-b2g-paid-submission')
and 'paid_platforms' in self.fields):
del self.fields['paid_platforms']
def clean(self):
data = self.cleaned_data
if 'upload' not in self.cleaned_data:
self._errors['upload'] = self.upload_error
return
if self.is_packaged():
# Now run the packaged app check, done in clean, because
# clean_packaged needs to be processed first.
try:
pkg = parse_addon(data['upload'], self.addon)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
ver = pkg.get('version')
if (ver and self.addon and
self.addon.versions.filter(version=ver).exists()):
self._errors['upload'] = _(u'Version %s already exists') % ver
return
origin = pkg.get('origin')
if origin:
try:
origin = verify_app_domain(origin, packaged=True,
exclude=self.addon)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
if origin:
data['origin'] = origin
else:
# Throw an error if this is a dupe.
# (JS sets manifest as `upload.name`.)
try:
verify_app_domain(data['upload'].name)
except forms.ValidationError, e:
self._errors['upload'] = self.error_class(e.messages)
return
return data
def is_packaged(self):
return self._is_packaged
class NewWebappForm(DeviceTypeForm, NewWebappVersionForm):
upload = forms.ModelChoiceField(widget=forms.HiddenInput,
queryset=FileUpload.objects.filter(valid=True),
error_messages={'invalid_choice': _lazy(
u'There was an error with your upload. Please try again.')})
packaged = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(NewWebappForm, self).__init__(*args, **kwargs)
if 'paid_platforms' in self.fields:
self.fields['paid_platforms'].choices = PAID_PLATFORMS(
self.request)
def _add_error(self, msg):
self._errors['free_platforms'] = self._errors['paid_platforms'] = (
self.ERRORS[msg])
def clean(self):
data = super(NewWebappForm, self).clean()
if not data:
return
if self.is_packaged():
self._set_packaged_errors()
if self._errors.get('free_platforms'):
return
return data
def is_packaged(self):
return self._is_packaged or self.cleaned_data.get('packaged', False)
class PaypalSetupForm(happyforms.Form):
business_account = forms.ChoiceField(widget=forms.RadioSelect, choices=[],
label=_lazy(u'Do you already have a PayPal Premier '
'or Business account?'))
email = forms.EmailField(required=False,
label=_lazy(u'PayPal email address'))
def __init__(self, *args, **kw):
super(PaypalSetupForm, self).__init__(*args, **kw)
self.fields['business_account'].choices = (('yes', _lazy('Yes')),
('no', _lazy('No')),
('later', _lazy(u"I'll link my PayPal account later.")))
def clean(self):
data = self.cleaned_data
if data.get('business_account') == 'yes' and not data.get('email'):
msg = _(u'The PayPal email is required.')
self._errors['email'] = self.error_class([msg])
return data
class UpsellForm(happyforms.Form):
price = forms.ModelChoiceField(queryset=Price.objects.active(),
label=_lazy(u'App Price'),
empty_label=None,
required=True)
make_public = forms.TypedChoiceField(choices=APP_PUBLIC_CHOICES,
widget=forms.RadioSelect(),
label=_lazy(u'When should your app be '
'made available for sale?'),
coerce=int,
required=False)
free = AddonChoiceField(queryset=Addon.objects.none(),
required=False, empty_label='',
# L10n: "App" is a paid version of this app. "from" is this app.
label=_lazy(u'App to upgrade from'),
widget=forms.Select())
def __init__(self, *args, **kw):
self.extra = kw.pop('extra')
self.request = kw.pop('request')
self.addon = self.extra['addon']
if 'initial' not in kw:
kw['initial'] = {}
kw['initial']['make_public'] = amo.PUBLIC_IMMEDIATELY
if self.addon.premium:
kw['initial']['price'] = self.addon.premium.price
super(UpsellForm, self).__init__(*args, **kw)
self.fields['free'].queryset = (self.extra['amo_user'].addons
.exclude(pk=self.addon.pk)
.filter(premium_type__in=amo.ADDON_FREES,
status__in=amo.VALID_STATUSES,
type=self.addon.type))
if len(self.fields['price'].choices) > 1:
# Tier 0 (Free) should not be the default selection.
self.initial['price'] = (Price.objects.active()
.exclude(price='0.00')[0])
def clean_make_public(self):
return (amo.PUBLIC_WAIT if self.cleaned_data.get('make_public')
else None)
def save(self):
if 'price' in self.cleaned_data:
premium = self.addon.premium
if not premium:
premium = AddonPremium()
premium.addon = self.addon
premium.price = self.cleaned_data['price']
premium.save()
upsell = self.addon.upsold
if self.cleaned_data['free']:
# Check if this app was already a premium version for another app.
if upsell and upsell.free != self.cleaned_data['free']:
upsell.delete()
if not upsell:
upsell = AddonUpsell(premium=self.addon)
upsell.free = self.cleaned_data['free']
upsell.save()
elif upsell:
upsell.delete()
self.addon.update(make_public=self.cleaned_data['make_public'])
class AppDetailsBasicForm(TranslationFormMixin, happyforms.ModelForm):
"""Form for "Details" submission step."""
app_slug = forms.CharField(max_length=30,
widget=forms.TextInput(attrs={'class': 'm'}))
description = TransField(required=True,
label=_lazy(u'Description:'),
help_text=_lazy(u'This description will appear on the details page.'),
widget=TransTextarea(attrs={'rows': 4}))
privacy_policy = TransField(widget=TransTextarea(attrs={'rows': 6}),
label=_lazy(u'Privacy Policy:'),
help_text=_lazy(u"A privacy policy that explains what "
"data is transmitted from a user's computer and how "
"it is used is required."))
homepage = TransField.adapt(forms.URLField)(required=False,
verify_exists=False, label=_lazy(u'Homepage:'),
help_text=_lazy(u'If your app has another homepage, enter its address '
'here.'),
widget=TransInput(attrs={'class': 'full'}))
support_url = TransField.adapt(forms.URLField)(required=False,
verify_exists=False, label=_lazy(u'Support Website:'),
help_text=_lazy(u'If your app has a support website or forum, enter '
'its address here.'),
widget=TransInput(attrs={'class': 'full'}))
support_email = TransField.adapt(forms.EmailField)(
label=_lazy(u'Support Email:'),
help_text=_lazy(u'The email address used by end users to contact you '
'with support issues and refund requests.'),
widget=TransInput(attrs={'class': 'full'}))
flash = forms.TypedChoiceField(required=False,
coerce=lambda x: bool(int(x)),
label=_lazy(u'Does your app require Flash support?'),
initial=0,
choices=(
(1, _lazy(u'Yes')),
(0, _lazy(u'No')),
),
widget=forms.RadioSelect)
publish = forms.BooleanField(required=False, initial=1,
label=_lazy(u"Publish my app in the Firefox Marketplace as soon as "
"it's reviewed."),
help_text=_lazy(u"If selected your app will be published immediately "
"following its approval by reviewers. If you don't "
"select this option you will be notified via email "
"about your app's approval and you will need to log "
"in and manually publish it."))
class Meta:
model = Addon
fields = ('app_slug', 'description', 'privacy_policy', 'homepage',
'support_url', 'support_email')
def __init__(self, *args, **kw):
self.request = kw.pop('request')
kw.setdefault('initial', {})
# Prefill support email.
locale = self.base_fields['support_email'].default_locale.lower()
kw['initial']['support_email'] = {locale: self.request.amo_user.email}
super(AppDetailsBasicForm, self).__init__(*args, **kw)
def clean_app_slug(self):
slug = self.cleaned_data['app_slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlacklistedSlug.blocked(slug):
raise forms.ValidationError(
_('The slug cannot be "%s". Please choose another.'
% slug))
return slug.lower()
def save(self, *args, **kw):
uses_flash = self.cleaned_data.get('flash')
af = self.instance.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
form = super(AppDetailsBasicForm, self).save(commit=False)
form.save()
return form
class AppFeaturesForm(happyforms.ModelForm):
class Meta:
exclude = ['version']
model = AppFeatures
def __init__(self, *args, **kwargs):
super(AppFeaturesForm, self).__init__(*args, **kwargs)
if self.instance:
self.initial_features = sorted(self.instance.to_keys())
else:
self.initial_features = None
def all_fields(self):
"""
Degeneratorizes self.__iter__(), the list of fields on the form. This
allows further manipulation of fields: to display a subset of fields or
order them in a specific way.
"""
return [f for f in self.__iter__()]
def required_api_fields(self):
"""
All fields on the form, alphabetically sorted by help text.
"""
return sorted(self.all_fields(), key=lambda x: x.help_text)
def get_tooltip(self, field):
field_id = field.name.split('_', 1)[1].upper()
return (unicode(APP_FEATURES[field_id].get('description') or '') if
field_id in APP_FEATURES else None)
def _changed_features(self):
old_features = defaultdict.fromkeys(self.initial_features, True)
old_features = set(unicode(f) for f
in AppFeatures(**old_features).to_list())
new_features = set(unicode(f) for f in self.instance.to_list())
added_features = new_features - old_features
removed_features = old_features - new_features
return added_features, removed_features
def save(self, *args, **kwargs):
mark_for_rereview = kwargs.pop('mark_for_rereview', True)
addon = self.instance.version.addon
rval = super(AppFeaturesForm, self).save(*args, **kwargs)
if (self.instance and mark_for_rereview and
addon.status in amo.WEBAPPS_APPROVED_STATUSES and
sorted(self.instance.to_keys()) != self.initial_features):
added_features, removed_features = self._changed_features()
mark_for_rereview_features_change(addon,
added_features,
removed_features)
return rval
| 39.450505
| 79
| 0.600881
|
855ff5f83e8cb57104b274efc4b444b9ae4f33b8
| 12,175
|
py
|
Python
|
mindhome_alpha/erpnext/projects/doctype/task/task.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/projects/doctype/task/task.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/projects/doctype/task/task.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe import _, throw
from frappe.desk.form.assign_to import clear, close_all_assignments
from frappe.model.mapper import get_mapped_doc
from frappe.utils import add_days, cstr, date_diff, get_link_to_form, getdate, today, flt
from frappe.utils.nestedset import NestedSet
class CircularReferenceError(frappe.ValidationError): pass
class EndDateCannotBeGreaterThanProjectEndDateError(frappe.ValidationError): pass
class Task(NestedSet):
nsm_parent_field = 'parent_task'
def get_feed(self):
return '{0}: {1}'.format(_(self.status), self.subject)
def get_customer_details(self):
cust = frappe.db.sql("select customer_name from `tabCustomer` where name=%s", self.customer)
if cust:
ret = {'customer_name': cust and cust[0][0] or ''}
return ret
def validate(self):
self.validate_dates()
self.validate_parent_expected_end_date()
self.validate_parent_project_dates()
self.validate_progress()
self.validate_status()
self.update_depends_on()
self.validate_dependencies_for_template_task()
def validate_dates(self):
if self.exp_start_date and self.exp_end_date and getdate(self.exp_start_date) > getdate(self.exp_end_date):
frappe.throw(_("{0} can not be greater than {1}").format(frappe.bold("Expected Start Date"), \
frappe.bold("Expected End Date")))
if self.act_start_date and self.act_end_date and getdate(self.act_start_date) > getdate(self.act_end_date):
frappe.throw(_("{0} can not be greater than {1}").format(frappe.bold("Actual Start Date"), \
frappe.bold("Actual End Date")))
def validate_parent_expected_end_date(self):
if self.parent_task:
parent_exp_end_date = frappe.db.get_value("Task", self.parent_task, "exp_end_date")
if parent_exp_end_date and getdate(self.get("exp_end_date")) > getdate(parent_exp_end_date):
frappe.throw(_("Expected End Date should be less than or equal to parent task's Expected End Date {0}.").format(getdate(parent_exp_end_date)))
def validate_parent_project_dates(self):
if not self.project or frappe.flags.in_test:
return
expected_end_date = frappe.db.get_value("Project", self.project, "expected_end_date")
if expected_end_date:
validate_project_dates(getdate(expected_end_date), self, "exp_start_date", "exp_end_date", "Expected")
validate_project_dates(getdate(expected_end_date), self, "act_start_date", "act_end_date", "Actual")
def validate_status(self):
if self.is_template and self.status != "Template":
self.status = "Template"
if self.status!=self.get_db_value("status") and self.status == "Completed":
for d in self.depends_on:
if frappe.db.get_value("Task", d.task, "status") not in ("Completed", "Cancelled"):
frappe.throw(_("Cannot complete task {0} as its dependant task {1} are not ccompleted / cancelled.").format(frappe.bold(self.name), frappe.bold(d.task)))
close_all_assignments(self.doctype, self.name)
def validate_progress(self):
if flt(self.progress or 0) > 100:
frappe.throw(_("Progress % for a task cannot be more than 100."))
if flt(self.progress) == 100:
self.status = 'Completed'
if self.status == 'Completed':
self.progress = 100
def validate_dependencies_for_template_task(self):
if self.is_template:
self.validate_parent_template_task()
self.validate_depends_on_tasks()
def validate_parent_template_task(self):
if self.parent_task:
if not frappe.db.get_value("Task", self.parent_task, "is_template"):
parent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(self.parent_task)
frappe.throw(_("Parent Task {0} is not a Template Task").format(parent_task_format))
def validate_depends_on_tasks(self):
if self.depends_on:
for task in self.depends_on:
if not frappe.db.get_value("Task", task.task, "is_template"):
dependent_task_format = """<a href="#Form/Task/{0}">{0}</a>""".format(task.task)
frappe.throw(_("Dependent Task {0} is not a Template Task").format(dependent_task_format))
def update_depends_on(self):
depends_on_tasks = self.depends_on_tasks or ""
for d in self.depends_on:
if d.task and d.task not in depends_on_tasks:
depends_on_tasks += d.task + ","
self.depends_on_tasks = depends_on_tasks
def update_nsm_model(self):
frappe.utils.nestedset.update_nsm(self)
def on_update(self):
self.update_nsm_model()
self.check_recursion()
self.reschedule_dependent_tasks()
self.update_project()
self.unassign_todo()
self.populate_depends_on()
def unassign_todo(self):
if self.status == "Completed":
close_all_assignments(self.doctype, self.name)
if self.status == "Cancelled":
clear(self.doctype, self.name)
def update_total_expense_claim(self):
self.total_expense_claim = frappe.db.sql("""select sum(total_sanctioned_amount) from `tabExpense Claim`
where project = %s and task = %s and docstatus=1""",(self.project, self.name))[0][0]
def update_time_and_costing(self):
tl = frappe.db.sql("""select min(from_time) as start_date, max(to_time) as end_date,
sum(billing_amount) as total_billing_amount, sum(costing_amount) as total_costing_amount,
sum(hours) as time from `tabTimesheet Detail` where task = %s and docstatus=1"""
,self.name, as_dict=1)[0]
if self.status == "Open":
self.status = "Working"
self.total_costing_amount= tl.total_costing_amount
self.total_billing_amount= tl.total_billing_amount
self.actual_time= tl.time
self.act_start_date= tl.start_date
self.act_end_date= tl.end_date
def update_project(self):
if self.project and not self.flags.from_project:
frappe.get_cached_doc("Project", self.project).update_project()
def check_recursion(self):
if self.flags.ignore_recursion_check: return
check_list = [['task', 'parent'], ['parent', 'task']]
for d in check_list:
task_list, count = [self.name], 0
while (len(task_list) > count ):
tasks = frappe.db.sql(" select %s from `tabTask Depends On` where %s = %s " %
(d[0], d[1], '%s'), cstr(task_list[count]))
count = count + 1
for b in tasks:
if b[0] == self.name:
frappe.throw(_("Circular Reference Error"), CircularReferenceError)
if b[0]:
task_list.append(b[0])
if count == 15:
break
def reschedule_dependent_tasks(self):
end_date = self.exp_end_date or self.act_end_date
if end_date:
for task_name in frappe.db.sql("""
select name from `tabTask` as parent
where parent.project = %(project)s
and parent.name in (
select parent from `tabTask Depends On` as child
where child.task = %(task)s and child.project = %(project)s)
""", {'project': self.project, 'task':self.name }, as_dict=1):
task = frappe.get_doc("Task", task_name.name)
if task.exp_start_date and task.exp_end_date and task.exp_start_date < getdate(end_date) and task.status == "Open":
task_duration = date_diff(task.exp_end_date, task.exp_start_date)
task.exp_start_date = add_days(end_date, 1)
task.exp_end_date = add_days(task.exp_start_date, task_duration)
task.flags.ignore_recursion_check = True
task.save()
def has_webform_permission(self):
project_user = frappe.db.get_value("Project User", {"parent": self.project, "user":frappe.session.user} , "user")
if project_user:
return True
def populate_depends_on(self):
if self.parent_task:
parent = frappe.get_doc('Task', self.parent_task)
if self.name not in [row.task for row in parent.depends_on]:
parent.append("depends_on", {
"doctype": "Task Depends On",
"task": self.name,
"subject": self.subject
})
parent.save()
def on_trash(self):
if check_if_child_exists(self.name):
throw(_("Child Task exists for this Task. You can not delete this Task."))
self.update_nsm_model()
def after_delete(self):
self.update_project()
def update_status(self):
if self.status not in ('Cancelled', 'Completed') and self.exp_end_date:
from datetime import datetime
if self.exp_end_date < datetime.now().date():
self.db_set('status', 'Overdue', update_modified=False)
self.update_project()
@frappe.whitelist()
def check_if_child_exists(name):
child_tasks = frappe.get_all("Task", filters={"parent_task": name})
child_tasks = [get_link_to_form("Task", task.name) for task in child_tasks]
return child_tasks
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_project(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
meta = frappe.get_meta(doctype)
searchfields = meta.get_search_fields()
search_columns = ", " + ", ".join(searchfields) if searchfields else ''
search_cond = " or " + " or ".join([field + " like %(txt)s" for field in searchfields])
return frappe.db.sql(""" select name {search_columns} from `tabProject`
where %(key)s like %(txt)s
%(mcond)s
{search_condition}
order by name
limit %(start)s, %(page_len)s""".format(search_columns = search_columns,
search_condition=search_cond), {
'key': searchfield,
'txt': '%' + txt + '%',
'mcond':get_match_cond(doctype),
'start': start,
'page_len': page_len
})
@frappe.whitelist()
def set_multiple_status(names, status):
names = json.loads(names)
for name in names:
task = frappe.get_doc("Task", name)
task.status = status
task.save()
def set_tasks_as_overdue():
tasks = frappe.get_all("Task", filters={"status": ["not in", ["Cancelled", "Completed"]]}, fields=["name", "status", "review_date"])
for task in tasks:
if task.status == "Pending Review":
if getdate(task.review_date) > getdate(today()):
continue
frappe.get_doc("Task", task.name).update_status()
@frappe.whitelist()
def make_timesheet(source_name, target_doc=None, ignore_permissions=False):
def set_missing_values(source, target):
target.append("time_logs", {
"hours": source.actual_time,
"completed": source.status == "Completed",
"project": source.project,
"task": source.name
})
doclist = get_mapped_doc("Task", source_name, {
"Task": {
"doctype": "Timesheet"
}
}, target_doc, postprocess=set_missing_values, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def get_children(doctype, parent, task=None, project=None, is_root=False):
filters = [['docstatus', '<', '2']]
if task:
filters.append(['parent_task', '=', task])
elif parent and not is_root:
# via expand child
filters.append(['parent_task', '=', parent])
else:
filters.append(['ifnull(`parent_task`, "")', '=', ''])
if project:
filters.append(['project', '=', project])
tasks = frappe.get_list(doctype, fields=[
'name as value',
'subject as title',
'is_group as expandable'
], filters=filters, order_by='name')
# return tasks
return tasks
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args.update({
"name_field": "subject"
})
args = make_tree_args(**args)
if args.parent_task == 'All Tasks' or args.parent_task == args.project:
args.parent_task = None
frappe.get_doc(args).insert()
@frappe.whitelist()
def add_multiple_tasks(data, parent):
data = json.loads(data)
new_doc = {'doctype': 'Task', 'parent_task': parent if parent!="All Tasks" else ""}
new_doc['project'] = frappe.db.get_value('Task', {"name": parent}, 'project') or ""
for d in data:
if not d.get("subject"): continue
new_doc['subject'] = d.get("subject")
new_task = frappe.get_doc(new_doc)
new_task.insert()
def on_doctype_update():
frappe.db.add_index("Task", ["lft", "rgt"])
def validate_project_dates(project_end_date, task, task_start, task_end, actual_or_expected_date):
if task.get(task_start) and date_diff(project_end_date, getdate(task.get(task_start))) < 0:
frappe.throw(_("Task's {0} Start Date cannot be after Project's End Date.").format(actual_or_expected_date))
if task.get(task_end) and date_diff(project_end_date, getdate(task.get(task_end))) < 0:
frappe.throw(_("Task's {0} End Date cannot be after Project's End Date.").format(actual_or_expected_date))
| 35.495627
| 158
| 0.723121
|
fbb8bff412879c88f8451739a8706f331d1a97d8
| 4,125
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/python_api/lldbutil/iter/TestRegistersIterator.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 765
|
2015-12-03T16:44:59.000Z
|
2022-03-07T12:41:10.000Z
|
lldb/packages/Python/lldbsuite/test/python_api/lldbutil/iter/TestRegistersIterator.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 1,815
|
2015-12-11T23:56:05.000Z
|
2020-01-10T19:28:43.000Z
|
lldb/packages/Python/lldbsuite/test/python_api/lldbutil/iter/TestRegistersIterator.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 284
|
2015-12-03T16:47:25.000Z
|
2022-03-12T05:39:48.000Z
|
"""
Test the iteration protocol for frame registers.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class RegistersIteratorTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line1 = line_number(
'main.cpp', '// Set break point at this line.')
@add_test_categories(['pyapi'])
def test_iter_registers(self):
"""Test iterator works correctly for lldbutil.iter_registers()."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.cpp", self.line1)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
if not process:
self.fail("SBTarget.LaunchProcess() failed")
import lldbsuite.test.lldbutil as lldbutil
for thread in process:
if thread.GetStopReason() == lldb.eStopReasonBreakpoint:
for frame in thread:
# Dump the registers of this frame using
# lldbutil.get_GPRs() and friends.
if self.TraceOn():
print(frame)
REGs = lldbutil.get_GPRs(frame)
num = len(REGs)
if self.TraceOn():
print(
"\nNumber of general purpose registers: %d" %
num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print("%s => %s" % (reg.GetName(), reg.GetValue()))
REGs = lldbutil.get_FPRs(frame)
num = len(REGs)
if self.TraceOn():
print("\nNumber of floating point registers: %d" % num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print("%s => %s" % (reg.GetName(), reg.GetValue()))
REGs = lldbutil.get_ESRs(frame)
if self.platformIsDarwin():
if self.getArchitecture() != 'armv7' and self.getArchitecture() != 'armv7k':
num = len(REGs)
if self.TraceOn():
print(
"\nNumber of exception state registers: %d" %
num)
for reg in REGs:
self.assertTrue(reg)
if self.TraceOn():
print(
"%s => %s" %
(reg.GetName(), reg.GetValue()))
else:
self.assertIsNone(REGs)
# And these should also work.
for kind in ["General Purpose Registers",
"Floating Point Registers"]:
REGs = lldbutil.get_registers(frame, kind)
self.assertTrue(REGs)
REGs = lldbutil.get_registers(
frame, "Exception State Registers")
if self.platformIsDarwin():
if self.getArchitecture() != 'armv7' and self.getArchitecture() != 'armv7k':
self.assertIsNotNone(REGs)
else:
self.assertIsNone(REGs)
# We've finished dumping the registers for frame #0.
break
| 38.915094
| 100
| 0.473697
|
0d0492bf1357d4e1926042a04ca1eed241de2f18
| 4,921
|
py
|
Python
|
tests/unit/sagemaker/model/test_neo.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | 1
|
2020-11-20T14:48:24.000Z
|
2020-11-20T14:48:24.000Z
|
tests/unit/sagemaker/model/test_neo.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/sagemaker/model/test_neo.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | 1
|
2020-04-30T07:43:57.000Z
|
2020-04-30T07:43:57.000Z
|
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import boto3
import pytest
from mock import Mock, patch
from sagemaker.model import Model
from tests.unit import NEO_REGION_LIST
MODEL_DATA = "s3://bucket/model.tar.gz"
MODEL_IMAGE = "mi"
REGION = "us-west-2"
NEO_REGION_ACCOUNT = "301217895009"
DESCRIBE_COMPILATION_JOB_RESPONSE = {
"CompilationJobStatus": "Completed",
"ModelArtifacts": {"S3ModelArtifacts": "s3://output-path/model.tar.gz"},
}
@pytest.fixture
def sagemaker_session():
return Mock(boto_region_name=REGION)
def _create_model(sagemaker_session=None):
return Model(MODEL_DATA, MODEL_IMAGE, sagemaker_session=sagemaker_session)
def test_compile_model_for_inferentia(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
model.compile(
target_instance_family="ml_inf",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tensorflow",
framework_version="1.15.0",
job_name="compile-model",
)
assert (
"{}.dkr.ecr.{}.amazonaws.com/sagemaker-neo-tensorflow:1.15.0-inf-py3".format(
NEO_REGION_ACCOUNT, REGION
)
== model.image
)
assert model._is_compiled_model is True
def test_compile_model_for_edge_device(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
model.compile(
target_instance_family="deeplens",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tensorflow",
job_name="compile-model",
)
assert model._is_compiled_model is False
def test_compile_model_for_edge_device_tflite(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
model.compile(
target_instance_family="deeplens",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tflite",
job_name="tflite-compile-model",
)
assert model._is_compiled_model is False
def test_compile_model_for_cloud(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
model.compile(
target_instance_family="ml_c4",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tensorflow",
job_name="compile-model",
)
assert model._is_compiled_model is True
def test_compile_model_for_cloud_tflite(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
model.compile(
target_instance_family="ml_c4",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tflite",
job_name="tflite-compile-model",
)
assert model._is_compiled_model is True
@patch("sagemaker.session.Session")
def test_compile_creates_session(session):
session.return_value.boto_region_name = REGION
model = _create_model()
model.compile(
target_instance_family="ml_c4",
input_shape={"data": [1, 3, 1024, 1024]},
output_path="s3://output",
role="role",
framework="tensorflow",
job_name="compile-model",
)
assert session.return_value == model.sagemaker_session
def test_check_neo_region(sagemaker_session):
sagemaker_session.wait_for_compilation_job = Mock(
return_value=DESCRIBE_COMPILATION_JOB_RESPONSE
)
model = _create_model(sagemaker_session)
boto_session = boto3.Session()
for partition in boto_session.get_available_partitions():
for region_name in boto_session.get_available_regions("ec2", partition_name=partition):
assert (region_name in NEO_REGION_LIST) is model.check_neo_region(region_name)
| 31.343949
| 95
| 0.706157
|
2cd93e6b5a302fac84cac92c1a56617e0a0404d7
| 10,376
|
py
|
Python
|
spark_jobs/top_seller.py
|
WillianFuks/PySpark-RecSys
|
756c8ce420143ac2483d8e6f959df4019a5479ee
|
[
"MIT"
] | 5
|
2019-01-10T16:06:04.000Z
|
2020-11-12T01:19:30.000Z
|
spark_jobs/top_seller.py
|
dutradda/PySpark-RecSys
|
756c8ce420143ac2483d8e6f959df4019a5479ee
|
[
"MIT"
] | null | null | null |
spark_jobs/top_seller.py
|
dutradda/PySpark-RecSys
|
756c8ce420143ac2483d8e6f959df4019a5479ee
|
[
"MIT"
] | 2
|
2019-01-10T16:15:03.000Z
|
2020-11-17T11:37:54.000Z
|
#MIT License
#
#Copyright (c) 2017 Willian Fuks
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
"""
Set of tools to run Marreco's Top Seller algorithm in spark.
"""
import os
import sys
import json
import operator
import math
import random
import argparse
from collections import defaultdict
sys.path.append('..')
from base import MarrecoBase
from py4j.protocol import Py4JJavaError
from pyspark.sql.utils import AnalysisException
from pyspark.sql import SparkSession
from pyspark.sql import types as stypes
class MarrecoTopSellerJob(MarrecoBase):
"""This Class has all methods necessary to build Marreco Neighborhood
against Spark.
:type context: `pyspark.SparkContext`
:param context: context in which Jobs are ran against.
"""
def transform_data(self, sc, args):
"""This method gets datajet files as input and prepare them on a daily
intermediary basis for Marreco's Top Seller algorithm.
:type sc: spark context
:param sc: spark context for running jobs.
:param kwargs:
:type days_init: int
:param days: how many days to scan through the files to be used
in the transformation phase.
:type days_end: int
:param days_end:
:type inter_uri: str
:param inter_uri: uri for where to save intermediate results.
:type force: str
:param force: either ``yes``, in which case forces recreation of
files, or ``no``, which in case if files already
exist then does nothing.
:type source_uri: str
:param source_uri: URI from where to read files.
"""
spark = SparkSession(sc)
for day in range(args.days_init, args.days_end - 1, -1):
formatted_day = self.get_formatted_date(day)
source_uri = args.source_uri.format(formatted_day)
inter_uri = args.inter_uri.format(formatted_day)
try:
inter_data = spark.read.json(inter_uri,
schema = self._load_top_seller_schema()).first()
if args.force == 'yes' or not inter_data:
self._process_datajet_day(sc,
source_uri,
inter_uri,
'overwrite')
except (Py4JJavaError, AnalysisException):
self._process_datajet_day(sc, source_uri, inter_uri)
finally:
print('processed data for {} day'.format(day))
def _process_datajet_day(self, sc, uri, inter_uri, mode=None):
"""Gets datajet json like files and transforms them into data like
[(sku, items_sold),...] saving it in the end.
:type sc: spark context
:param sc: context to run spark jobs.
:type uri: str
:param uri: where the files are located.
:type inter_uri: str
:param inter_uri: where intermediate results should be saved.
:type mode: str
:param mode: indicates how data should be saved. If ``None`` then
throws error if file already exist. If ``overwrite`` then
deletes previous file and saves new one.
"""
sc.textFile(uri) \
.flatMap(lambda x: self._process_json(x)) \
.filter(lambda x: x) \
.reduceByKey(operator.add) \
.toDF(schema=self._load_top_seller_schema()) \
.write.json(inter_uri, compression='gzip', mode=mode)
def _load_top_seller_schema(self):
"""Loads schema for top seller intermediate data saved like
[sku, items_sold]
:rtype: `pyspark.sql.StructType`
:returns: schema for top selling data
"""
return stypes.StructType(fields=[
stypes.StructField("item_key", stypes.StringType()),
stypes.StructField("value", stypes.IntegerType())])
def build_marreco(self, sc, args):
"""Main method for building Marreco's algorithms and saving results
for later usage.
:type sc: `pyspark.SparkContext`
:param sc: spark context for running jobs.
:type args: Namespace
:param args:
:type days_init: int
:param days_init: which date time that will be used for reading data
with intermediary daily results.
:type days_end: int
:param days_end: until what file to read input data.
:type inter_uri: str
:param inter_uri: URI where intermediary results should be read from
:type source_uri: str
:param source_uri: source from where to read input data
:type force: str
:param force: either ``yes`` in which case replace intermediate files
or ``no`` where nothing is done if file already exists.
:type top_seller_uri: str
:param top_seller_uri: URI for where to save results
"""
spark = SparkSession(sc)
data = sc.emptyRDD()
for day in range(args.days_init, args.days_end - 1, -1):
formatted_day = self.get_formatted_date(day)
inter_uri = self._render_inter_uri(args.inter_uri.format(
formatted_day))
data = data.union(spark.read.json(inter_uri,
schema=self._load_top_seller_schema()).rdd)
data = data.reduceByKey(operator.add) \
.sortBy(lambda x: x[1], False)
self._save_top_seller_matrix(args.top_seller_uri, data)
def _save_top_seller_matrix(self, top_seller_uri, data):
"""Loads top seller schema and saves final results as
[(item_key, items_sold), (item_key, items_sold)...]}
:type top_seller_uri: str
:param top_seller_uri: uri for where to save the matrix.
:type data: RDD
:param data: RDD with data like [item_key, items_sold]
"""
data.toDF(schema=self._load_top_seller_schema()) \
.write.json(top_seller_uri, compression='gzip', mode='overwrite')
def _render_inter_uri(self, inter_uri, name_pattern='part-*'):
"""Helper function to process inter_uri's for later usage.
:type inter_uri: str
:param inter_uri: URI used for saving intermediate data transformation
results.
:type name_pattern: str
:param name_pattern: pattern used by spark to save multiple files.
:rtype: str
:returns: URI rendered template for retrieving data back to code.
"""
return os.path.join(inter_uri, name_pattern)
@staticmethod
def _process_json(row):
"""Mapper function to extract from each line from datajet file
and return interactions between customers and sold skus.
:type row: str
:param row: json string with datajet data.
:rtype: list
:returns: `yield` on [sku, items_sold]
"""
try:
r = json.loads(row)
if (r['event']['source']['tracker'] == 'fish' and
'local_timestamp' in r['event'] and
r['event']['identifiers']['djUCID']['value'] and
r['event']['type'] == "orderconfirmation"):
for e in list(zip([e['group_id'] for e in
r['event']['details']['products']],
([int(e) for e in
r['event']['details']['quantities']]))):
yield e
except:
yield []
@staticmethod
def process_sysargs(args):
parser = argparse.ArgumentParser()
parser.add_argument('--days_init',
dest='days_init',
type=int,
help=("Total amount of days to come back in time "
"from today's date."))
parser.add_argument('--days_end',
dest='days_end',
type=int,
help=("Total amount of days to come back in time "
"from today's date."))
parser.add_argument('--source_uri',
dest='source_uri',
type=str,
help=("URI template from where to read source "
"files from."))
parser.add_argument('--inter_uri',
dest='inter_uri',
type=str,
help=('URI for saving intermediary results.'))
parser.add_argument('--top_seller_uri',
dest='top_seller_uri',
type=str,
help=('URI for saving top_seller results.'))
parser.add_argument('--force',
dest='force',
type=str,
help=('If ``yes`` then replace all files with new ones. '
' If ``no``, then no replacing happens.'))
args = parser.parse_args(args)
return args
| 36.925267
| 85
| 0.575559
|
edd343cebdeb232ef9b1cfeeec5d6ec9c64139af
| 1,303
|
py
|
Python
|
Proyecto/start.py
|
leynier/IA-Sim-Com
|
f6e99bb1aa4b02d5d558dc76a9bf802c3761e428
|
[
"MIT"
] | 2
|
2021-11-20T23:35:20.000Z
|
2021-12-10T17:45:56.000Z
|
Proyecto/start.py
|
arnel-sanchez/IA-Sim-Com
|
22023342f20202b260caa759af9cce71d803663e
|
[
"MIT"
] | 1
|
2022-02-11T07:26:54.000Z
|
2022-02-11T07:26:54.000Z
|
Proyecto/start.py
|
leynier/IA-Sim-Com
|
f6e99bb1aa4b02d5d558dc76a9bf802c3761e428
|
[
"MIT"
] | 1
|
2022-02-11T07:24:50.000Z
|
2022-02-11T07:24:50.000Z
|
from pynput import keyboard
from os import name, system
from time import sleep
from sys import exit
def main():
print_welcome()
keyboard.Listener(key).run()
def print_welcome():
clear_console()
print("Hola, bienvenido al simulador de Jefe Tecnico de Moto GP")
print("Para Iniciar Nueva Simulacion Presione [N]")
print("Para Salir del Simulador Presione [E]")
def clear_console():
if name == "ce" or name == "nt" or name == "dos":
system("cls")
elif name == "posix":
system("clear")
def key(tecla):
if tecla == keyboard.KeyCode.from_char('n'):
new_simulation()
print_new_simulation()
elif tecla == keyboard.KeyCode.from_char('e'):
exit_()
def new_simulation():
clear_console()
print("Se ha iniciado una nueva simulacion....")
test_simulation()
def test_simulation():
print("\n\nSIMULACION:")
time = 1 # Tiempo que demora la simulacion de una vuelta
stop = False # Reajustes en tiempo real
#start(time, stop)
def print_new_simulation():
print("Para Iniciar Nueva Simulacion Presione [N]")
print("Para Salir del Simulador Presione [E]")
def exit_():
clear_console()
print("Simulaciones terminadas")
sleep(3)
exit(0)
if __name__ == '__main__':
main()
| 21.360656
| 69
| 0.651573
|
a62c7ee9a219eee0e31ce5474969cf413e7db5af
| 5,147
|
py
|
Python
|
onlinecourse/migrations/0001_initial.py
|
Givindu98/Givindu-Final-Cloud-App-With-Database
|
81ebaa0735596ed3197806ff04e7eb679e6cb44a
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0001_initial.py
|
Givindu98/Givindu-Final-Cloud-App-With-Database
|
81ebaa0735596ed3197806ff04e7eb679e6cb44a
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/migrations/0001_initial.py
|
Givindu98/Givindu-Final-Cloud-App-With-Database
|
81ebaa0735596ed3197806ff04e7eb679e6cb44a
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-12-17 06:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=1000)),
('is_correct', models.BooleanField(default=0)),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='online course', max_length=30)),
('image', models.ImageField(upload_to='course_images/')),
('description', models.CharField(max_length=1000)),
('pub_date', models.DateField(null=True)),
('total_enrollment', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Enrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_enrolled', models.DateField(default=django.utils.timezone.now)),
('mode', models.CharField(choices=[('audit', 'Audit'), ('honor', 'Honor'), ('BETA', 'BETA')], default='audit', max_length=5)),
('rating', models.FloatField(default=5.0)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.ManyToManyField(to='onlinecourse.Choice')),
('enrollment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.enrollment')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=1000)),
('grade', models.IntegerField(default=0)),
('lesson_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=200)),
('order', models.IntegerField(default=0)),
('content', models.TextField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.course')),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('occupation', models.CharField(choices=[('student', 'Student'), ('developer', 'Developer'), ('data_scientist', 'Data Scientist'), ('dba', 'Database Admin')], default='student', max_length=20)),
('social_link', models.URLField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Instructor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_time', models.BooleanField(default=True)),
('total_learners', models.IntegerField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='course',
name='instructors',
field=models.ManyToManyField(to='onlinecourse.Instructor'),
),
migrations.AddField(
model_name='course',
name='users',
field=models.ManyToManyField(through='onlinecourse.Enrollment', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='onlinecourse.question'),
),
]
| 47.220183
| 210
| 0.58539
|
2adabc6d5f5f495577e601fa440aebfc6fc082c9
| 426
|
py
|
Python
|
derivest/__init__.py
|
njwichrowski/pyDERIVEST
|
dcca2e98080e1b141674d44af7fd5f1d0f4395f0
|
[
"BSD-2-Clause"
] | null | null | null |
derivest/__init__.py
|
njwichrowski/pyDERIVEST
|
dcca2e98080e1b141674d44af7fd5f1d0f4395f0
|
[
"BSD-2-Clause"
] | null | null | null |
derivest/__init__.py
|
njwichrowski/pyDERIVEST
|
dcca2e98080e1b141674d44af7fd5f1d0f4395f0
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
__all__ = ["derivest", "directional_diff", "gradest", "hess_diag",
"hessian", "jacobianest", "ensemble", "build_kwargs"]
from .derivest import derivest
from .directional_diff import directional_diff
from .gradest import gradest
from .hess_diag import hess_diag
from .hessian import hessian
from .jacobianest import jacobianest
from .ensemble import ensemble
from .utils import build_kwargs
| 32.769231
| 66
| 0.760563
|
4473002dc1d1213e1c9b19da8bac47d1812f867d
| 3,576
|
py
|
Python
|
scrapy/tests/test_utils_defer.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 1
|
2016-01-01T14:58:12.000Z
|
2016-01-01T14:58:12.000Z
|
scrapy/tests/test_utils_defer.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | 2
|
2021-12-13T20:51:32.000Z
|
2022-02-11T03:47:35.000Z
|
scrapy/tests/test_utils_defer.py
|
emschorsch/scrapy
|
acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f
|
[
"BSD-3-Clause"
] | null | null | null |
from twisted.trial import unittest
from twisted.internet import reactor, defer
from twisted.python.failure import Failure
from scrapy.utils.defer import mustbe_deferred, process_chain, \
process_chain_both, process_parallel, iter_errback
class MustbeDeferredTest(unittest.TestCase):
def test_success_function(self):
steps = []
def _append(v):
steps.append(v)
return steps
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(self.assertEqual, [1,2]) # it is [1] with maybeDeferred
steps.append(2) # add another value, that should be catched by assertEqual
return dfd
def test_unfired_deferred(self):
steps = []
def _append(v):
steps.append(v)
dfd = defer.Deferred()
reactor.callLater(0, dfd.callback, steps)
return dfd
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(self.assertEqual, [1,2]) # it is [1] with maybeDeferred
steps.append(2) # add another value, that should be catched by assertEqual
return dfd
def cb1(value, arg1, arg2):
return "(cb1 %s %s %s)" % (value, arg1, arg2)
def cb2(value, arg1, arg2):
return defer.succeed("(cb2 %s %s %s)" % (value, arg1, arg2))
def cb3(value, arg1, arg2):
return "(cb3 %s %s %s)" % (value, arg1, arg2)
def cb_fail(value, arg1, arg2):
return Failure(TypeError())
def eb1(failure, arg1, arg2):
return "(eb1 %s %s %s)" % (failure.value.__class__.__name__, arg1, arg2)
class DeferUtilsTest(unittest.TestCase):
@defer.inlineCallbacks
def test_process_chain(self):
x = yield process_chain([cb1, cb2, cb3], 'res', 'v1', 'v2')
self.assertEqual(x, "(cb3 (cb2 (cb1 res v1 v2) v1 v2) v1 v2)")
gotexc = False
try:
yield process_chain([cb1, cb_fail, cb3], 'res', 'v1', 'v2')
except TypeError, e:
gotexc = True
self.failUnless(gotexc)
@defer.inlineCallbacks
def test_process_chain_both(self):
x = yield process_chain_both([cb_fail, cb2, cb3], [None, eb1, None], 'res', 'v1', 'v2')
self.assertEqual(x, "(cb3 (eb1 TypeError v1 v2) v1 v2)")
fail = Failure(ZeroDivisionError())
x = yield process_chain_both([eb1, cb2, cb3], [eb1, None, None], fail, 'v1', 'v2')
self.assertEqual(x, "(cb3 (cb2 (eb1 ZeroDivisionError v1 v2) v1 v2) v1 v2)")
@defer.inlineCallbacks
def test_process_parallel(self):
x = yield process_parallel([cb1, cb2, cb3], 'res', 'v1', 'v2')
self.assertEqual(x, ['(cb1 res v1 v2)', '(cb2 res v1 v2)', '(cb3 res v1 v2)'])
def test_process_parallel_failure(self):
d = process_parallel([cb1, cb_fail, cb3], 'res', 'v1', 'v2')
self.failUnlessFailure(d, TypeError)
self.flushLoggedErrors()
return d
class IterErrbackTest(unittest.TestCase):
def test_iter_errback_good(self):
def itergood():
for x in xrange(10):
yield x
errors = []
out = list(iter_errback(itergood(), errors.append))
self.failUnlessEqual(out, range(10))
self.failIf(errors)
def test_iter_errback_bad(self):
def iterbad():
for x in xrange(10):
if x == 5:
a = 1/0
yield x
errors = []
out = list(iter_errback(iterbad(), errors.append))
self.failUnlessEqual(out, [0, 1, 2, 3, 4])
self.failUnlessEqual(len(errors), 1)
self.failUnless(isinstance(errors[0].value, ZeroDivisionError))
| 34.057143
| 95
| 0.608501
|
8489aec84e0d2d0e45728de480daeeeae98a9576
| 581
|
py
|
Python
|
tests/virtualenvs/pickle_env.py
|
Hernrup/pipdeptree
|
7c90d2b76467eda76122b40a1fe736758f346c92
|
[
"MIT"
] | null | null | null |
tests/virtualenvs/pickle_env.py
|
Hernrup/pipdeptree
|
7c90d2b76467eda76122b40a1fe736758f346c92
|
[
"MIT"
] | null | null | null |
tests/virtualenvs/pickle_env.py
|
Hernrup/pipdeptree
|
7c90d2b76467eda76122b40a1fe736758f346c92
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# This is a small tool to create a pickle file for a set of packages for the
# purposes of writing tests
import pickle
import sys
try:
from pip._internal.utils.misc import get_installed_distributions
except ImportError:
from pip import get_installed_distributions
def main():
default_skip = ['setuptools', 'pip', 'python', 'distribute']
skip = default_skip + ['pipdeptree']
pkgs = get_installed_distributions(local_only=True, skip=skip)
pickle.dump(pkgs, sys.stdout)
return 0
if __name__ == '__main__':
sys.exit(main())
| 23.24
| 76
| 0.722892
|
155fd7a67f9a9edc0c2bd8f87fae7578bf4259f1
| 356
|
py
|
Python
|
img_to_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
img_to_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
img_to_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
import ffmpeg
import os
out_path='/home/ld/RepPoints/final/epoch13 thres0.3/vis/vis/'
video_name=os.listdir(out_path)
for i in range(len(video_name)):
video_path=os.path.join(out_path,video_name[i])
(
ffmpeg
.input(os.path.join(video_path,'*.jpg'), pattern_type='glob', framerate=10)
.output(os.path.join(out_path,video_name[i]+'.mp4'))
.run()
)
| 29.666667
| 77
| 0.730337
|
3e42af30a532d4ca6224467f86bb8b3b53455f9e
| 29,859
|
py
|
Python
|
test/functional/wallet_bumpfee.py
|
SpaceXpanse/xaya
|
d106801eb4a86f6d7153ea21e7b49807ecf85091
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
SpaceXpanse/xaya
|
d106801eb4a86f6d7153ea21e7b49807ecf85091
|
[
"MIT"
] | null | null | null |
test/functional/wallet_bumpfee.py
|
SpaceXpanse/xaya
|
d106801eb4a86f6d7153ea21e7b49807ecf85091
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from decimal import Decimal
from test_framework.blocktools import (
COINBASE_MATURITY,
add_witness_commitment,
create_block,
create_coinbase,
send_to_witness,
)
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
tx_from_hex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
# Fee rates (sat/vB)
INSUFFICIENT = 1
ECONOMICAL = 150
NORMAL = 250
HIGH = 500
TOO_HIGH = 100000
class BumpFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[
"-walletrbf={}".format(i),
"-mintxfee=0.00002",
"-addresstype=bech32",
] for i in range(self.num_nodes)]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def clear_mempool(self):
# Clear mempool between subtests. The subtests may only depend on chainstate (utxos)
self.nodes[1].generate(1)
self.sync_all()
def run_test(self):
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
for mode in ["default", "fee_rate"]:
test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address)
self.test_invalid_parameters(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
test_nonrbf_bumpfee_fails(self, peer_node, dest_address)
test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address)
test_dust_to_fee(self, rbf_node, dest_address)
test_watchonly_psbt(self, peer_node, rbf_node, dest_address)
test_rebumping(self, rbf_node, dest_address)
test_rebumping_not_replaceable(self, rbf_node, dest_address)
test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address)
test_bumpfee_metadata(self, rbf_node, dest_address)
test_locked_wallet_fails(self, rbf_node, dest_address)
test_change_script_match(self, rbf_node, dest_address)
test_settxfee(self, rbf_node, dest_address)
test_maxtxfee_fails(self, rbf_node, dest_address)
# These tests wipe out a number of utxos that are expected in other tests
test_small_output_with_feerate_succeeds(self, rbf_node, dest_address)
test_no_more_inputs_fails(self, rbf_node, dest_address)
def test_invalid_parameters(self, rbf_node, peer_node, dest_address):
self.log.info('Test invalid parameters')
rbfid = spend_one_input(rbf_node, dest_address)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-3, "Unexpected key {}".format(key), rbf_node.bumpfee, rbfid, {key: NORMAL})
# Bumping to just above minrelay should fail to increase the total fee enough.
assert_raises_rpc_error(-8, "Insufficient total fee 0.00000141", rbf_node.bumpfee, rbfid, {"fee_rate": INSUFFICIENT})
self.log.info("Test invalid fee rate settings")
assert_raises_rpc_error(-4, "Specified or calculated fee 0.141 is too high (cannot be higher than -maxtxfee 0.10",
rbf_node.bumpfee, rbfid, {"fee_rate": TOO_HIGH})
# Test fee_rate with zero values.
msg = "Insufficient total fee 0.00"
for zero_value in [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]:
assert_raises_rpc_error(-8, msg, rbf_node.bumpfee, rbfid, {"fee_rate": zero_value})
msg = "Invalid amount"
# Test fee_rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, msg, rbf_node.bumpfee, rbfid, {"fee_rate": invalid_value})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, msg, rbf_node.bumpfee, rbfid, {"fee_rate": invalid_value})
# Test fee_rate out of range (negative number).
assert_raises_rpc_error(-3, "Amount out of range", rbf_node.bumpfee, rbfid, {"fee_rate": -1})
# Test type error.
for value in [{"foo": "bar"}, True]:
assert_raises_rpc_error(-3, "Amount is not a number or string", rbf_node.bumpfee, rbfid, {"fee_rate": value})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and conf_target are passed")
assert_raises_rpc_error(-8, "Cannot specify both conf_target and fee_rate. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.",
rbf_node.bumpfee, rbfid, {"conf_target": NORMAL, "fee_rate": NORMAL})
self.log.info("Test explicit fee rate raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
rbf_node.bumpfee, rbfid, {"estimate_mode": "economical", "fee_rate": NORMAL})
self.log.info("Test invalid conf_target settings")
assert_raises_rpc_error(-8, "confTarget and conf_target options should not both be set",
rbf_node.bumpfee, rbfid, {"confTarget": 123, "conf_target": 456})
self.log.info("Test invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
rbf_node.bumpfee, rbfid, {"estimate_mode": v})
for mode in ["foo", Decimal("3.1415"), "sat/B", "ROD/kB"]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
rbf_node.bumpfee, rbfid, {"estimate_mode": mode})
self.clear_mempool()
def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.log.info('Test simple bumpfee: {}'.format(mode))
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] > -rbftx["fee"]
assert_equal(bumped_tx["origfee"], -rbftx["fee"])
assert "psbt" not in bumped_tx
assert_equal(bumped_psbt["errors"], [])
assert bumped_psbt["fee"] > -rbftx["fee"]
assert_equal(bumped_psbt["origfee"], -rbftx["fee"])
assert "psbt" in bumped_psbt
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
self.sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
self.clear_mempool()
def test_segwit_bumpfee_succeeds(self, rbf_node, dest_address):
self.log.info('Test that segwit-sourcing bumpfee works')
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.getaddressinfo(rbf_node.getnewaddress(address_type='bech32'))
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransactionwithwallet(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
self.clear_mempool()
def test_nonrbf_bumpfee_fails(self, peer_node, dest_address):
self.log.info('Test that we cannot replace a non RBF transaction')
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_rpc_error(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
self.clear_mempool()
def test_notmine_bumpfee_fails(self, rbf_node, peer_node, dest_address):
self.log.info('Test that it cannot bump fee if non-owned inputs are included')
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
fee = Decimal("0.001")
utxos = [node.listunspent(query_options={'minimumAmount': fee})[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - fee
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransactionwithwallet(rawtx)
signedtx = peer_node.signrawtransactionwithwallet(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_rpc_error(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
self.clear_mempool()
def test_bumpfee_with_descendant_fails(self, rbf_node, rbf_node_address, dest_address):
self.log.info('Test that fee cannot be bumped when it has descendant')
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransactionwithwallet(tx)
rbf_node.sendrawtransaction(tx["hex"])
assert_raises_rpc_error(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
self.clear_mempool()
def test_small_output_with_feerate_succeeds(self, rbf_node, dest_address):
self.log.info('Testing small output with feerate bump succeeds')
# Make sure additional inputs exist
rbf_node.generatetoaddress(COINBASE_MATURITY + 1, rbf_node.getnewaddress())
rbfid = spend_one_input(rbf_node, dest_address)
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_equal(len(input_list), 1)
original_txin = input_list[0]
self.log.info('Keep bumping until transaction fee out-spends non-destination value')
tx_fee = 0
while True:
input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
new_item = list(input_list)[0]
assert_equal(len(input_list), 1)
assert_equal(original_txin["txid"], new_item["txid"])
assert_equal(original_txin["vout"], new_item["vout"])
rbfid_new_details = rbf_node.bumpfee(rbfid)
rbfid_new = rbfid_new_details["txid"]
raw_pool = rbf_node.getrawmempool()
assert rbfid not in raw_pool
assert rbfid_new in raw_pool
rbfid = rbfid_new
tx_fee = rbfid_new_details["fee"]
# Total value from input not going to destination
if tx_fee > Decimal('0.00050000'):
break
# input(s) have been added
final_input_list = rbf_node.getrawtransaction(rbfid, 1)["vin"]
assert_greater_than(len(final_input_list), 1)
# Original input is in final set
assert [txin for txin in final_input_list
if txin["txid"] == original_txin["txid"]
and txin["vout"] == original_txin["vout"]]
rbf_node.generatetoaddress(1, rbf_node.getnewaddress())
assert_equal(rbf_node.gettransaction(rbfid)["confirmations"], 1)
self.clear_mempool()
def test_dust_to_fee(self, rbf_node, dest_address):
self.log.info('Test that bumped output that is dust is dropped to fee')
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
# The DER formatting used by Bitcoin to serialize ECDSA signatures means that signatures can have a
# variable size of 70-72 bytes (or possibly even less), with most being 71 or 72 bytes. The signature
# in the witness is divided by 4 for the vsize, so this variance can take the weight across a 4-byte
# boundary. Thus expected transaction size (p2wpkh, 1 input, 2 outputs) is 140-141 vbytes, usually 141.
if not 140 <= fulltx["vsize"] <= 141:
raise AssertionError("Invalid tx vsize of {} (140-141 expected), full tx: {}".format(fulltx["vsize"], fulltx))
# Bump with fee_rate of 350.25 sat/vB vbytes to create dust.
# Expected fee is 141 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049385 BTC.
# or occasionally 140 vbytes * fee_rate 0.00350250 BTC / 1000 vbytes = 0.00049035 BTC.
# Dust should be dropped to the fee, so actual bump fee is 0.00050000 BTC.
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": 350.25})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) # change output is eliminated
assert_equal(full_bumped_tx["vout"][0]['value'], Decimal("0.00050000"))
self.clear_mempool()
def test_settxfee(self, rbf_node, dest_address):
self.log.info('Test settxfee')
assert_raises_rpc_error(-8, "txfee cannot be less than min relay tx fee", rbf_node.settxfee, Decimal('0.000005'))
assert_raises_rpc_error(-8, "txfee cannot be less than wallet min fee", rbf_node.settxfee, Decimal('0.000015'))
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00250000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["vsize"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
# check that settxfee respects -maxtxfee
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
assert_raises_rpc_error(-8, "txfee cannot be more than wallet max tx fee", rbf_node.settxfee, Decimal('0.00003'))
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_maxtxfee_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when it hits -maxtxfee')
# size of bumped transaction (p2wpkh, 1 input, 2 outputs): 141 vbytes
# expected bump fee of 141 vbytes * 0.00200000 BTC / 1000 vbytes = 0.00002820 BTC
# which exceeds maxtxfee and is expected to raise
self.restart_node(1, ['-maxtxfee=0.000025'] + self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_rpc_error(-4, "Unable to create transaction. Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", rbf_node.bumpfee, rbfid)
self.restart_node(1, self.extra_args[1])
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.connect_nodes(1, 0)
self.clear_mempool()
def test_watchonly_psbt(self, peer_node, rbf_node, dest_address):
self.log.info('Test that PSBT is returned for bumpfee in watchonly wallets')
priv_rec_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0"
pub_rec_desc = rbf_node.getdescriptorinfo(priv_rec_desc)["descriptor"]
priv_change_desc = "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh"
pub_change_desc = rbf_node.getdescriptorinfo(priv_change_desc)["descriptor"]
# Create a wallet with private keys that can sign PSBTs
rbf_node.createwallet(wallet_name="signer", disable_private_keys=False, blank=True)
signer = rbf_node.get_wallet_rpc("signer")
assert signer.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": priv_rec_desc,
"timestamp": 0,
"range": [0,1],
"internal": False,
"keypool": False # Keys can only be imported to the keypool when private keys are disabled
},
{
"desc": priv_change_desc,
"timestamp": 0,
"range": [0, 0],
"internal": True,
"keypool": False
}]
if self.options.descriptors:
result = signer.importdescriptors(reqs)
else:
result = signer.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
# Create another wallet with just the public keys, which creates PSBTs
rbf_node.createwallet(wallet_name="watcher", disable_private_keys=True, blank=True)
watcher = rbf_node.get_wallet_rpc("watcher")
assert not watcher.getwalletinfo()['private_keys_enabled']
reqs = [{
"desc": pub_rec_desc,
"timestamp": 0,
"range": [0, 10],
"internal": False,
"keypool": True,
"watchonly": True,
"active": True,
}, {
"desc": pub_change_desc,
"timestamp": 0,
"range": [0, 10],
"internal": True,
"keypool": True,
"watchonly": True,
"active": True,
}]
if self.options.descriptors:
result = watcher.importdescriptors(reqs)
else:
result = watcher.importmulti(reqs)
assert_equal(result, [{'success': True}, {'success': True}])
funding_address1 = watcher.getnewaddress(address_type='bech32')
funding_address2 = watcher.getnewaddress(address_type='bech32')
peer_node.sendmany("", {funding_address1: 0.001, funding_address2: 0.001})
peer_node.generate(1)
self.sync_all()
# Create single-input PSBT for transaction to be bumped
psbt = watcher.walletcreatefundedpsbt([], {dest_address: 0.0005}, 0, {"fee_rate": 1}, True)['psbt']
psbt_signed = signer.walletprocesspsbt(psbt=psbt, sign=True, sighashtype="ALL", bip32derivs=True)
psbt_final = watcher.finalizepsbt(psbt_signed["psbt"])
original_txid = watcher.sendrawtransaction(psbt_final["hex"])
assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1)
# bumpfee can't be used on watchonly wallets
assert_raises_rpc_error(-4, "bumpfee is not available with wallets that have private keys disabled. Use psbtbumpfee instead.", watcher.bumpfee, original_txid)
# Bump fee, obnoxiously high to add additional watchonly input
bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH})
assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1)
assert "txid" not in bumped_psbt
assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"])
assert not watcher.finalizepsbt(bumped_psbt["psbt"])["complete"]
# Sign bumped transaction
bumped_psbt_signed = signer.walletprocesspsbt(psbt=bumped_psbt["psbt"], sign=True, sighashtype="ALL", bip32derivs=True)
bumped_psbt_final = watcher.finalizepsbt(bumped_psbt_signed["psbt"])
assert bumped_psbt_final["complete"]
# Broadcast bumped transaction
bumped_txid = watcher.sendrawtransaction(bumped_psbt_final["hex"])
assert bumped_txid in rbf_node.getrawmempool()
assert original_txid not in rbf_node.getrawmempool()
rbf_node.unloadwallet("watcher")
rbf_node.unloadwallet("signer")
self.clear_mempool()
def test_rebumping(self, rbf_node, dest_address):
self.log.info('Test that re-bumping the original tx fails, but bumping successor works')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_raises_rpc_error(-4, "already bumped", rbf_node.bumpfee, rbfid, {"fee_rate": NORMAL})
rbf_node.bumpfee(bumped["txid"], {"fee_rate": NORMAL})
self.clear_mempool()
def test_rebumping_not_replaceable(self, rbf_node, dest_address):
self.log.info('Test that re-bumping non-replaceable fails')
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL, "replaceable": False})
assert_raises_rpc_error(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"fee_rate": NORMAL})
self.clear_mempool()
def test_unconfirmed_not_spendable(self, rbf_node, rbf_node_address):
self.log.info('Test that unconfirmed outputs from bumped txns are not spendable')
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then invalidate the block so the rbf tx will be put back in the mempool.
# This makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
# Can not abandon conflicted tx
assert_raises_rpc_error(-5, 'Transaction not eligible for abandonment', lambda: rbf_node.abandontransaction(txid=bumpid))
rbf_node.invalidateblock(block.hash)
# Call abandon to make sure the wallet doesn't attempt to resubmit
# the bump tx and hope the wallet does not rebroadcast before we call.
rbf_node.abandontransaction(bumpid)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
self.clear_mempool()
def test_bumpfee_metadata(self, rbf_node, dest_address):
self.log.info('Test that bumped txn metadata persists to new txn record')
assert(rbf_node.getbalance() < 49)
rbf_node.generatetoaddress(101, rbf_node.getnewaddress())
rbfid = rbf_node.sendtoaddress(dest_address, 49, "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
self.clear_mempool()
def test_locked_wallet_fails(self, rbf_node, dest_address):
self.log.info('Test that locked wallet cannot bump txn')
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
rbf_node.walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
self.clear_mempool()
def test_change_script_match(self, rbf_node, dest_address):
self.log.info('Test that the same change addresses is used for the replacement transaction when possible')
def get_change_address(tx):
tx_details = rbf_node.getrawtransaction(tx, 1)
txout_addresses = [txout['scriptPubKey']['address'] for txout in tx_details["vout"]]
return [address for address in txout_addresses if rbf_node.getaddressinfo(address)["ischange"]]
# Check that there is only one change output
rbfid = spend_one_input(rbf_node, dest_address)
change_addresses = get_change_address(rbfid)
assert_equal(len(change_addresses), 1)
# Now find that address in each subsequent tx, and no other change
bumped_total_tx = rbf_node.bumpfee(rbfid, {"fee_rate": ECONOMICAL})
assert_equal(change_addresses, get_change_address(bumped_total_tx['txid']))
bumped_rate_tx = rbf_node.bumpfee(bumped_total_tx["txid"])
assert_equal(change_addresses, get_change_address(bumped_rate_tx['txid']))
self.clear_mempool()
def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = tx_from_hex(tx)
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
add_witness_commitment(block)
block.solve()
node.submitblock(block.serialize().hex())
return block
def test_no_more_inputs_fails(self, rbf_node, dest_address):
self.log.info('Test that bumpfee fails when there are no available confirmed outputs')
# feerate rbf requires confirmed outputs when change output doesn't exist or is insufficient
rbf_node.generatetoaddress(1, dest_address)
# spend all funds, no change output
# In contrast to upstream, we need to do that in multiple transactions
# for SpaceXpanse. Otherwise the lower tx size limit is exceeded.
num_chunks = 10
per_chunk = (rbf_node.getbalance() - 1) / num_chunks
per_chunk = per_chunk.quantize(Decimal('0.00000000'))
for i in range(num_chunks):
rbf_node.sendtoaddress(rbf_node.getnewaddress(), per_chunk)
rbf_node.generate(1)
rbfid = rbf_node.sendtoaddress(rbf_node.getnewaddress(), rbf_node.getbalance(), "", "", True)
assert_raises_rpc_error(-4, "Unable to create transaction. Insufficient funds", rbf_node.bumpfee, rbfid)
self.clear_mempool()
if __name__ == "__main__":
BumpFeeTest().main()
| 48.23748
| 175
| 0.712482
|
9116789f32b578ba52ef0ec55e20145f971d78d2
| 13,644
|
py
|
Python
|
pysnmp/JUNIPER-JS-NAT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/JUNIPER-JS-NAT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/JUNIPER-JS-NAT-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module JUNIPER-JS-NAT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-JS-NAT-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:48:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
InetAddressType, InetAddressIPv4, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddressIPv4", "InetAddress")
jnxJsNAT, = mibBuilder.importSymbols("JUNIPER-JS-SMI", "jnxJsNAT")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Gauge32, Unsigned32, NotificationType, Counter32, Bits, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, MibIdentifier, TimeTicks, Integer32, Counter64, IpAddress, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Gauge32", "Unsigned32", "NotificationType", "Counter32", "Bits", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "MibIdentifier", "TimeTicks", "Integer32", "Counter64", "IpAddress", "ModuleIdentity")
TextualConvention, DisplayString, DateAndTime = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "DateAndTime")
jnxJsNatMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1))
jnxJsNatMIB.setRevisions(('2007-04-13 20:22', '2012-03-01 11:22',))
if mibBuilder.loadTexts: jnxJsNatMIB.setLastUpdated('201203011122Z')
if mibBuilder.loadTexts: jnxJsNatMIB.setOrganization('Juniper Networks, Inc.')
jnxJsNatNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 0))
jnxJsNatObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1))
jnxJsNatTrapVars = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 2))
jnxJsSrcNatNumOfEntries = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsSrcNatNumOfEntries.setStatus('current')
jnxJsSrcNatTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2), )
if mibBuilder.loadTexts: jnxJsSrcNatTable.setStatus('deprecated')
jnxJsSrcNatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1), ).setIndexNames((0, "JUNIPER-JS-NAT-MIB", "jnxJsNatSrcIpPoolName"), (0, "JUNIPER-JS-NAT-MIB", "jnxJsNatSrcGlobalAddr"))
if mibBuilder.loadTexts: jnxJsSrcNatEntry.setStatus('deprecated')
jnxJsNatSrcIpPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: jnxJsNatSrcIpPoolName.setStatus('deprecated')
jnxJsNatSrcGlobalAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 2), InetAddressIPv4())
if mibBuilder.loadTexts: jnxJsNatSrcGlobalAddr.setStatus('deprecated')
jnxJsNatSrcPortPoolType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("withPAT", 1), ("withoutPAT", 2), ("static", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcPortPoolType.setStatus('deprecated')
jnxJsNatSrcNumOfPortInuse = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcNumOfPortInuse.setStatus('deprecated')
jnxJsNatSrcNumOfSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcNumOfSessions.setStatus('deprecated')
jnxJsNatSrcAssocatedIf = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 2, 1, 6), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcAssocatedIf.setStatus('deprecated')
jnxJsNatIfSrcPoolPortTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3), )
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolPortTable.setStatus('current')
jnxJsNatIfSrcPoolPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1), ).setIndexNames((0, "JUNIPER-JS-NAT-MIB", "jnxJsNatIfSrcPoolIndex"))
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolPortEntry.setStatus('current')
jnxJsNatIfSrcPoolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolIndex.setStatus('current')
jnxJsNatIfSrcPoolTotalSinglePorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolTotalSinglePorts.setStatus('current')
jnxJsNatIfSrcPoolAllocSinglePorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolAllocSinglePorts.setStatus('current')
jnxJsNatIfSrcPoolTotalTwinPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolTotalTwinPorts.setStatus('current')
jnxJsNatIfSrcPoolAllocTwinPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatIfSrcPoolAllocTwinPorts.setStatus('current')
jnxJsSrcNatStatsTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4), )
if mibBuilder.loadTexts: jnxJsSrcNatStatsTable.setStatus('current')
jnxJsSrcNatStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1), ).setIndexNames((0, "JUNIPER-JS-NAT-MIB", "jnxJsNatSrcPoolName"), (0, "JUNIPER-JS-NAT-MIB", "jnxJsNatSrcXlatedAddrType"), (0, "JUNIPER-JS-NAT-MIB", "jnxJsNatSrcXlatedAddr"))
if mibBuilder.loadTexts: jnxJsSrcNatStatsEntry.setStatus('current')
jnxJsNatSrcPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: jnxJsNatSrcPoolName.setStatus('current')
jnxJsNatSrcXlatedAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 2), InetAddressType())
if mibBuilder.loadTexts: jnxJsNatSrcXlatedAddrType.setStatus('current')
jnxJsNatSrcXlatedAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 3), InetAddress())
if mibBuilder.loadTexts: jnxJsNatSrcXlatedAddr.setStatus('current')
jnxJsNatSrcPoolType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("withPAT", 1), ("withoutPAT", 2), ("static", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcPoolType.setStatus('current')
jnxJsNatSrcNumPortInuse = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcNumPortInuse.setStatus('current')
jnxJsNatSrcNumSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 4, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatSrcNumSessions.setStatus('current')
jnxJsNatRuleTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5), )
if mibBuilder.loadTexts: jnxJsNatRuleTable.setStatus('current')
jnxJsNatRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5, 1), ).setIndexNames((0, "JUNIPER-JS-NAT-MIB", "jnxJsNatRuleName"), (0, "JUNIPER-JS-NAT-MIB", "jnxJsNatRuleType"))
if mibBuilder.loadTexts: jnxJsNatRuleEntry.setStatus('current')
jnxJsNatRuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatRuleName.setStatus('current')
jnxJsNatRuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("source", 1), ("destination", 2), ("static", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatRuleType.setStatus('current')
jnxJsNatRuleTransHits = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatRuleTransHits.setStatus('deprecated')
jnxJsNatRuleHits = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 5, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatRuleHits.setStatus('current')
jnxJsNatPoolTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6), )
if mibBuilder.loadTexts: jnxJsNatPoolTable.setStatus('current')
jnxJsNatPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6, 1), ).setIndexNames((0, "JUNIPER-JS-NAT-MIB", "jnxJsNatPoolName"), (0, "JUNIPER-JS-NAT-MIB", "jnxJsNatPoolType"))
if mibBuilder.loadTexts: jnxJsNatPoolEntry.setStatus('current')
jnxJsNatPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatPoolName.setStatus('current')
jnxJsNatPoolType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("source", 1), ("destination", 2), ("static", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatPoolType.setStatus('current')
jnxJsNatPoolTransHits = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatPoolTransHits.setStatus('deprecated')
jnxJsNatPoolHits = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 1, 6, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxJsNatPoolHits.setStatus('current')
jnxJsNatAddrPoolThresholdStatus = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 0, 1)).setObjects(("JUNIPER-JS-NAT-MIB", "jnxJsNatSrcIpPoolName"), ("JUNIPER-JS-NAT-MIB", "jnxJsNatAddrPoolUtil"))
if mibBuilder.loadTexts: jnxJsNatAddrPoolThresholdStatus.setStatus('deprecated')
jnxJsNatAddrPoolUtil = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: jnxJsNatAddrPoolUtil.setStatus('current')
jnxJsNatTrapPoolName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: jnxJsNatTrapPoolName.setStatus('current')
jnxJsSrcNatPoolThresholdStatus = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 39, 1, 7, 1, 0, 2)).setObjects(("JUNIPER-JS-NAT-MIB", "jnxJsNatTrapPoolName"), ("JUNIPER-JS-NAT-MIB", "jnxJsNatAddrPoolUtil"))
if mibBuilder.loadTexts: jnxJsSrcNatPoolThresholdStatus.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-JS-NAT-MIB", jnxJsNatSrcNumSessions=jnxJsNatSrcNumSessions, jnxJsNatSrcGlobalAddr=jnxJsNatSrcGlobalAddr, jnxJsSrcNatTable=jnxJsSrcNatTable, jnxJsSrcNatEntry=jnxJsSrcNatEntry, jnxJsSrcNatStatsEntry=jnxJsSrcNatStatsEntry, jnxJsNatRuleType=jnxJsNatRuleType, jnxJsNatPoolName=jnxJsNatPoolName, jnxJsNatRuleTable=jnxJsNatRuleTable, jnxJsNatPoolEntry=jnxJsNatPoolEntry, jnxJsNatSrcIpPoolName=jnxJsNatSrcIpPoolName, jnxJsNatTrapVars=jnxJsNatTrapVars, jnxJsNatIfSrcPoolIndex=jnxJsNatIfSrcPoolIndex, jnxJsNatSrcPoolName=jnxJsNatSrcPoolName, jnxJsNatIfSrcPoolAllocSinglePorts=jnxJsNatIfSrcPoolAllocSinglePorts, jnxJsNatMIB=jnxJsNatMIB, jnxJsNatSrcNumPortInuse=jnxJsNatSrcNumPortInuse, jnxJsSrcNatPoolThresholdStatus=jnxJsSrcNatPoolThresholdStatus, jnxJsNatNotifications=jnxJsNatNotifications, jnxJsNatIfSrcPoolAllocTwinPorts=jnxJsNatIfSrcPoolAllocTwinPorts, jnxJsNatSrcPoolType=jnxJsNatSrcPoolType, jnxJsNatSrcXlatedAddr=jnxJsNatSrcXlatedAddr, jnxJsNatPoolTable=jnxJsNatPoolTable, jnxJsNatIfSrcPoolTotalSinglePorts=jnxJsNatIfSrcPoolTotalSinglePorts, jnxJsNatSrcPortPoolType=jnxJsNatSrcPortPoolType, jnxJsNatSrcNumOfPortInuse=jnxJsNatSrcNumOfPortInuse, PYSNMP_MODULE_ID=jnxJsNatMIB, jnxJsSrcNatNumOfEntries=jnxJsSrcNatNumOfEntries, jnxJsNatObjects=jnxJsNatObjects, jnxJsNatIfSrcPoolTotalTwinPorts=jnxJsNatIfSrcPoolTotalTwinPorts, jnxJsNatAddrPoolUtil=jnxJsNatAddrPoolUtil, jnxJsNatSrcXlatedAddrType=jnxJsNatSrcXlatedAddrType, jnxJsNatPoolTransHits=jnxJsNatPoolTransHits, jnxJsSrcNatStatsTable=jnxJsSrcNatStatsTable, jnxJsNatAddrPoolThresholdStatus=jnxJsNatAddrPoolThresholdStatus, jnxJsNatRuleEntry=jnxJsNatRuleEntry, jnxJsNatIfSrcPoolPortEntry=jnxJsNatIfSrcPoolPortEntry, jnxJsNatSrcAssocatedIf=jnxJsNatSrcAssocatedIf, jnxJsNatIfSrcPoolPortTable=jnxJsNatIfSrcPoolPortTable, jnxJsNatPoolType=jnxJsNatPoolType, jnxJsNatSrcNumOfSessions=jnxJsNatSrcNumOfSessions, jnxJsNatPoolHits=jnxJsNatPoolHits, jnxJsNatRuleName=jnxJsNatRuleName, jnxJsNatRuleTransHits=jnxJsNatRuleTransHits, jnxJsNatRuleHits=jnxJsNatRuleHits, jnxJsNatTrapPoolName=jnxJsNatTrapPoolName)
| 129.942857
| 2,076
| 0.76202
|
bf0e849cee23096ff15ed13e9a02c99fce8b9ef5
| 20,434
|
py
|
Python
|
train.py
|
NIST-NEON-DSE/deepTEA
|
fb0dfc407c6963ff21daf835bbd43e4c4124f22c
|
[
"Apache-2.0"
] | 2
|
2020-01-17T18:42:06.000Z
|
2020-07-03T04:57:04.000Z
|
train.py
|
NIST-NEON-DSE/deepTEA
|
fb0dfc407c6963ff21daf835bbd43e4c4124f22c
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
NIST-NEON-DSE/deepTEA
|
fb0dfc407c6963ff21daf835bbd43e4c4124f22c
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import sys
import warnings
#Import logger.
#if __name__ == "__main__":
# from comet_ml import Experiment, predictor
import keras
import keras.preprocessing.image
import tensorflow as tf
import glob
import sys
wdir = os.getcwd()
#print(dir)
sys.path.insert(0, os.path.join(os.path.dirname(wdir), '..', '..'))
import keras_retinanet.bin
__package__ = "keras_retinanet.bin"
# Change these to absolute imports if you copy this script outside the keras_retinanet package.
from .. import layers # noqa: F401
from .. import losses
from .. import models
from ..callbacks import RedirectModel
from ..callbacks.eval import Evaluate
from ..models.retinanet import retinanet_bbox
from ..preprocessing.csv_generator import CSVGenerator
from ..preprocessing.open_images import OpenImagesGenerator
from ..preprocessing.pascal_voc import PascalVocGenerator
from ..utils.anchors import make_shapes_callback
from ..utils.config import read_config_file, parse_anchor_parameters
from ..utils.keras_version import check_keras_version
from ..utils.model import freeze as freeze_model
from ..utils.transform import random_transform_generator
from ..utils.image import random_visual_effect_generator
# adjust this to point to your downloaded/trained model
# models can be downloaded here: https://github.com/fizyr/keras-retinanet/releases
model_path = os.path.join('./keras_retinanet/backbones', 'resnet50_coco_best_v2.1.0.h5')
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
""" Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def model_with_weights(model, weights, skip_mismatch):
""" Load weights for model.
Args
model : The model to load weights for.
weights : The weights to load.
skip_mismatch : If True, skips layers whose shape of weights doesn't match with the model.
"""
if weights is not None:
model.load_weights(weights, by_name=True, skip_mismatch=skip_mismatch)
return model
def create_models(backbone_retinanet, num_classes, weights, multi_gpu=0, \
freeze_backbone=False, lr=1e-5, config=None, nms_threshold=None, input_channels=3):
""" Creates three models (model, training_model, prediction_model).
Args
backbone_retinanet : A function to call to create a retinanet model with a given backbone.
num_classes : The number of classes to train.
weights : The weights to load into the model.
multi_gpu : The number of GPUs to use for training.
freeze_backbone : If True, disables learning for the backbone.
config : Config parameters, None indicates the default configuration.
Returns
model : The base model. This is also the model that is saved in snapshots.
training_model : The training model. If multi_gpu=0, this is identical to model.
prediction_model : The model wrapped with utility functions to perform object detection (applies regression values and performs NMS).
"""
modifier = freeze_model if freeze_backbone else None
# load anchor parameters, or pass None (so that defaults will be used)
anchor_params = None
num_anchors = None
if config and 'anchor_parameters' in config:
anchor_params = parse_anchor_parameters(config)
num_anchors = anchor_params.num_anchors()
# Keras recommends initialising a multi-gpu model on the CPU to ease weight sharing, and to prevent OOM errors.
# optionally wrap in a parallel model
if multi_gpu > 1:
from keras.utils import multi_gpu_model
with tf.device('/cpu:0'):
model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, \
modifier=modifier, input_channels=input_channels), weights=weights, skip_mismatch=True)
training_model = multi_gpu_model(model, gpus=multi_gpu)
else:
training_model = model = model_with_weights(backbone_retinanet(num_classes, num_anchors=num_anchors, \
modifier=modifier, input_channels=input_channels), weights=weights, skip_mismatch=True)
training_model = model
# make prediction model
print("Making prediction model with nms = %.2f" % nms_threshold )
prediction_model = retinanet_bbox(model=model, nms_threshold=nms_threshold, anchor_params=anchor_params)
# compile model
training_model.compile(
loss={
'regression' : losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
)
return model, training_model, prediction_model
def create_callbacks(model, training_model, prediction_model, train_generator, validation_generator, args, experiment = None):
""" Creates the callbacks to use during training.
Args
model: The base model.
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir = args.tensorboard_dir,
histogram_freq = 0,
batch_size = args.batch_size,
write_graph = True,
write_grads = False,
write_images = False,
embeddings_freq = 0,
embeddings_layer_names = None,
embeddings_metadata = None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
evaluation = Evaluate(validation_generator,
#experiment=experiment,
tensorboard=tensorboard_callback,
weighted_average=args.weighted_average)
evaluation = RedirectModel(evaluation, prediction_model)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
'{backbone}_{{epoch:02d}}.h5'.format(backbone=args.backbone)
),
verbose=1,
save_best_only=True,
monitor="mAP",
mode='max'
)
checkpoint = RedirectModel(checkpoint, model)
callbacks.append(checkpoint)
callbacks.append(keras.callbacks.ReduceLROnPlateau(
monitor = 'loss',
factor = 0.1,
patience = 2,
verbose = 1,
mode = 'auto',
min_delta = 0.0001,
cooldown = 0,
min_lr = 0
))
#create the NEON generator
#NEON_generator = create_NEON_generator(args.batch_size, DeepForest_config)
#neon_evaluation = NEONmAP(NEON_generator,
# experiment=experiment,
# save_path=args.save_path,
# score_threshold=args.score_threshold,
# DeepForest_config=DeepForest_config)
#neon_evaluation = RedirectModel(neon_evaluation, prediction_model)
#callbacks.append(neon_evaluation)
#comet_loss predictor
#predictor_callback = predictor.Predictor(experiment, loss_name="loss", patience = 10, best_callback= None, threshold=0.1)
return callbacks
def create_generators(args, preprocess_image):
""" Create generators for training and validation.
Args
args : parseargs object containing configuration for generators.
preprocess_image : Function that preprocesses an image for the network.
"""
common_args = {
'batch_size' : args.batch_size,
'config' : args.config,
'image_min_side' : args.image_min_side,
'image_max_side' : args.image_max_side,
'preprocess_image' : preprocess_image,
}
#TODO: here you want to include Hierarchical Dimensionality Reduction (https://github.com/GatorSense/hsi_toolkit_py/blob/master/dim_reduction/hdr.py)
# create random transform generator for augmenting training data
if args.random_transform:
transform_generator = random_transform_generator(
min_rotation=-0.1,
max_rotation=0.1,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
min_shear=-0.1,
max_shear=0.1,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
flip_y_chance=0.5,
)
visual_effect_generator = random_visual_effect_generator(
contrast_range=(0.9, 1.1),
brightness_range=(-.1, .1),
hue_range=(-0.05, 0.05),
saturation_range=(0.95, 1.05)
)
else:
transform_generator = random_transform_generator(flip_x_chance=0.5)
visual_effect_generator = None
train_generator = CSVGenerator(
args.annotations,
args.classes,
transform_generator=transform_generator,
visual_effect_generator=visual_effect_generator,
**common_args
)
if args.val_annotations:
validation_generator = CSVGenerator(
args.val_annotations,
args.classes,
shuffle_groups=True,
**common_args
)
else:
validation_generator = None
return train_generator, validation_generator
def check_args(parsed_args):
""" Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.multi_gpu > 1 and parsed_args.batch_size < parsed_args.multi_gpu:
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
parsed_args.multi_gpu))
if parsed_args.multi_gpu > 1 and parsed_args.snapshot:
raise ValueError(
"Multi GPU training ({}) and resuming from snapshots ({}) is not supported.".format(parsed_args.multi_gpu,
parsed_args.snapshot))
if parsed_args.multi_gpu > 1 and not parsed_args.multi_gpu_force:
raise ValueError("Multi-GPU support is experimental, use at own risk! Run with --multi-gpu-force if you wish to continue.")
if 'resnet' not in parsed_args.backbone:
warnings.warn('Using experimental backbone {}. Only resnet50 has been properly tested.'.format(parsed_args.backbone))
return parsed_args
def parse_args(args):
""" Parse the arguments.
"""
def csv_list(string):
return string.split(',')
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
#subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
group = parser.add_mutually_exclusive_group()
group.add_argument('--snapshot', help='Resume training from a snapshot.', default = './snapshots/latest_snapshot.h5', type=str)
group.add_argument('--imagenet-weights', help='Initialize the model with pretrained imagenet weights. This is the default behaviour.', action='store_const', const=True, default=True)
group.add_argument('--weights', help='Initialize the model with weights from a file.', default = './keras_retinanet/backbones/universal_deepLidar.h5', type=str)
group.add_argument('--no-weights', help='Don\'t initialize the model with any weights.', dest='imagenet_weights', action='store_const', const=False)
#not sure if I want them here
parser.add_argument('--annotations', help='Path to CSV file containing annotations for training.', default='./dataset/train.csv', type=str)
parser.add_argument('--classes', help='Path to a CSV file containing class label mapping.', default='./dataset/classes.csv',type=str)
parser.add_argument('--val-annotations', help='Path to CSV file containing annotations for validation (optional).', default='./dataset/test.csv', type=str)
#other args
parser.add_argument('--backbone', help='Backbone model used by retinanet.', default='resnet50', type=str)
parser.add_argument('--batch-size', help='Size of the batches.', default=40, type=int)
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--multi-gpu', help='Number of GPUs to use for parallel processing.', type=int, default=0)
parser.add_argument('--multi-gpu-force', help='Extra flag needed to enable (experimental) multi-gpu support.', action='store_true')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=40)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000) #batch_size
parser.add_argument('--batchsize', help='Batch size.', type=int, default=40) #batch_size
parser.add_argument('--lr', help='Learning rate.', type=float, default=1e-5)
parser.add_argument('--snapshot-path', help='Path to store snapshots of models during training (defaults to \'./snapshots\')', default='./snapshots/')
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output', default='./logs')
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation', action='store_false')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=200)
parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=200)
parser.add_argument('--config', help='Path to a configuration parameters .ini file.')
parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss', action='store_true')
parser.add_argument('--nms_threshold', help='Parameter regulating non-max suppression for overlapping boxes', type=float, default=0.1)
parser.add_argument('--input_channels', help='How many channels in the image?', type=int, default=3)
#Comet ml image viewer
parser.add_argument('--save-path', help='Path for saving eval images with detections (doesn\'t work for COCO).', default="./eval/", type=str)
parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.3).', default=0.05, type=float)
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int, default=10)
return check_args(parser.parse_args(args))
def main(args=None, experiment=None):
# parse arguments
print("parsing arguments")
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create object that stores backbone information
backbone = models.backbone(args.backbone)
# make sure keras is the minimum required version
check_keras_version()
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
keras.backend.tensorflow_backend.set_session(get_session())
# optionally load config parameters
if args.config:
args.config = read_config_file(args.config)
# create the generators
train_generator, validation_generator = create_generators(args, backbone.preprocess_image)
#Log number of trees trained on
if experiment:
experiment.log_parameter("Number of Training Trees", train_generator.total_trees)
# create the model
if args.snapshot is not None:
print('Loading model, this may take a second...')
model = models.load_model(args.snapshot, backbone_name=args.backbone)
training_model = model
anchor_params = None
if args.config and 'anchor_parameters' in args.config:
anchor_params = parse_anchor_parameters(args.config)
#nms_threshold=DeepForest_config["nms_threshold"]
prediction_model = retinanet_bbox(model=model, anchor_params=anchor_params) #nms_threshold=DeepForest_config["nms_threshold"]
else:
weights = args.weights
# default to imagenet if nothing else is specified
if weights is None and args.imagenet_weights:
weights = backbone.download_imagenet()
print('Creating model, this may take a second...')
model, training_model, prediction_model = create_models(
backbone_retinanet=backbone.retinanet,
num_classes=train_generator.num_classes(),
weights=weights,
multi_gpu=args.multi_gpu,
freeze_backbone=args.freeze_backbone,
nms_threshold=args.nms_threshold,
input_channels=args.input_channels,
lr=args.lr,
config=args.config
)
# print model summary
print(model.summary())
# this lets the generator compute backbone layer shapes using the actual backbone model
if 'vgg' in args.backbone or 'densenet' in args.backbone:
compute_anchor_targets = functools.partial(anchor_targets_bbox, shapes_callback=make_shapes_callback(model))
train_generator.compute_anchor_targets = compute_anchor_targets
#train_generator.compute_shapes = make_shapes_callback(model)
if validation_generator:
#validation_generator.compute_shapes = train_generator.compute_shapes
validation_generator.compute_anchor_targets = compute_anchor_targets
# create the callbacks
callbacks = create_callbacks(
model,
training_model,
prediction_model,
train_generator,
validation_generator,
args
)
if not args.compute_val_loss:
validation_generator = None
# start training
history = training_model.fit_generator(
generator=train_generator,
steps_per_epoch=train_generator.size()/args.batch_size,
epochs=args.epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator)
#return path snapshot of final epoch
saved_models = glob.glob(os.path.join(args.snapshot_path,"*.h5"))
saved_models.sort()
#Return model if found
if len(saved_models) > 0:
return saved_models[-1]
if __name__ == '__main__':
output_model = main(args=None)
| 48.307329
| 187
| 0.675639
|
871bede4c9c011418cb4d87c888c72e4ec3fcbf0
| 1,903
|
py
|
Python
|
lab7/project/main.py
|
CaramelIceCream/InformationSecurity
|
c509c40bbb929a7ff05123e486e8f260db327dd2
|
[
"CC-BY-4.0"
] | null | null | null |
lab7/project/main.py
|
CaramelIceCream/InformationSecurity
|
c509c40bbb929a7ff05123e486e8f260db327dd2
|
[
"CC-BY-4.0"
] | null | null | null |
lab7/project/main.py
|
CaramelIceCream/InformationSecurity
|
c509c40bbb929a7ff05123e486e8f260db327dd2
|
[
"CC-BY-4.0"
] | null | null | null |
# Декодировать сообщение
def decode(cr_message, key):
message = []
cr_message = cr_message.split()
key = key.split()
for i in range(0, len(cr_message)):
message.append(chr(int(cr_message[i], 16) ^ int(key[i], 16)))
return ''.join(message)
# Закодировать сообщение
def encode(message, key):
cr_message = []
key = key.split()
for i in range(0, len(message)):
cr_message.append((hex(ord(message[i]) ^ int(key[i], 16)).lstrip('0x')).upper())
if len(cr_message[i]) == 1:
cr_message[i] = '0' + cr_message[i]
return ' '.join(cr_message)
# Найти ключ
def get_key(message, cr_message):
cr_message = cr_message.split()
key = []
for i in range(0, len(message)):
key.append((hex(ord(message[i]) ^ int(cr_message[i], 16)).lstrip('0x')).upper())
if len(key[i]) == 1:
key[i] = '0' + key[i]
return ' '.join(key)
# message = 'С Новым Годом, друзья!'
# cr_message_test = '424 2c 40a 441 43c 405 40b f2 487 42e 43d 410 41e 7b df 4fc 44b 4f1 447 418 487 2a'
# key_test = '05 0C 17 7F 0E 4E 37 D2 94 10 09 2E 22 57 FF C8 0B B2 70 54 C8 0B'
print('Определим вид шифротекста при известном ключе и известном открытом тексте')
message = input('Введите текст сообщения: ')
key = input('Введите ключ: ')
cr_message_test = encode(message, key)
print('Закодированное сообщение:', cr_message_test)
print()
print('Определим ключ, с помощью которого шифротекст может быть преобразован в некоторый фрагмент текста')
message = input('Введите текст сообщения: ')
cr_message = input('Введите текст закодированного сообщения: ')
key = get_key(message, cr_message)
print('Ключ:', key)
print()
print('Декодируем сообщение при известном ключе')
cr_message = input('Введите текст закодированного сообщения: ')
key = input('Введите ключ: ')
message = decode(cr_message, key)
print('Декодированное сообщение:', message)
| 33.385965
| 106
| 0.669995
|
624e80fc1c9cf63ee33172055c4b6595eb22ccac
| 5,899
|
py
|
Python
|
dataset.py
|
cxqj/46-DeblurGANv2
|
967516534a1d2b833ff9e6558773064fa471353c
|
[
"BSD-3-Clause"
] | null | null | null |
dataset.py
|
cxqj/46-DeblurGANv2
|
967516534a1d2b833ff9e6558773064fa471353c
|
[
"BSD-3-Clause"
] | null | null | null |
dataset.py
|
cxqj/46-DeblurGANv2
|
967516534a1d2b833ff9e6558773064fa471353c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from copy import deepcopy
from functools import partial
from glob import glob
from hashlib import sha1
from typing import Callable, Iterable, Optional, Tuple
import cv2
import numpy as np
from glog import logger
from joblib import Parallel, cpu_count, delayed
from skimage.io import imread
from torch.utils.data import Dataset
from tqdm import tqdm
import aug
# 返回采样后的图片对
def subsample(data: Iterable, bounds: Tuple[float, float], hash_fn: Callable, n_buckets=100, salt='', verbose=True): # bounds:(0,0.9)
data = list(data) # [(path_a,path_b),(path_a,path_b),....(path_a,path_b)] 2103x2
# 将图片对编号随机置为0-100的整数
buckets = split_into_buckets(data, n_buckets=n_buckets, salt=salt, hash_fn=hash_fn) # (2103,) [46,61,30,35,....,25,96]??
lower_bound, upper_bound = [x * n_buckets for x in bounds] # 0, 90.0
msg = f'Subsampling buckets from {lower_bound} to {upper_bound}, total buckets number is {n_buckets}'
if salt:
msg += f'; salt is {salt}'
if verbose:
logger.info(msg)
return np.array([sample for bucket, sample in zip(buckets, data) if lower_bound <= bucket < upper_bound]) # samples between 0-90
# 随机生成hash地址
def hash_from_paths(x: Tuple[str, str], salt: str = '') -> str:
path_a, path_b = x
names = ''.join(map(os.path.basename, (path_a, path_b))) # 000047.png000047.png
return sha1(f'{names}_{salt}'.encode()).hexdigest()
def split_into_buckets(data: Iterable, n_buckets: int, hash_fn: Callable, salt=''):
hashes = map(partial(hash_fn, salt=salt), data)
return np.array([int(x, 16) % n_buckets for x in hashes])
def _read_img(x: str):
img = cv2.imread(x) # (720,1280,3)
if img is None:
logger.warning(f'Can not read image {x} with OpenCV, switching to scikit-image')
img = imread(x)
return img
class PairedDataset(Dataset):
def __init__(self,
files_a: Tuple[str],
files_b: Tuple[str],
transform_fn: Callable,
normalize_fn: Callable,
corrupt_fn: Optional[Callable] = None,
preload: bool = True,
preload_size: Optional[int] = 0,
verbose=True):
assert len(files_a) == len(files_b)
self.preload = preload # False
self.data_a = files_a # list (258,) ['/media/../000047.png',...]
self.data_b = files_b # list (258,) ['/media/../000047.png',...]
self.verbose = verbose # True
self.corrupt_fn = corrupt_fn
self.transform_fn = transform_fn
self.normalize_fn = normalize_fn
logger.info(f'Dataset has been created with {len(self.data_a)} samples')
if preload: # 是否预加载图片对
preload_fn = partial(self._bulk_preload, preload_size=preload_size)
if files_a == files_b:
self.data_a = self.data_b = preload_fn(self.data_a)
else:
self.data_a, self.data_b = map(preload_fn, (self.data_a, self.data_b))
self.preload = True
def _bulk_preload(self, data: Iterable[str], preload_size: int):
jobs = [delayed(self._preload)(x, preload_size=preload_size) for x in data]
jobs = tqdm(jobs, desc='preloading images', disable=not self.verbose)
return Parallel(n_jobs=cpu_count(), backend='threading')(jobs)
@staticmethod
def _preload(x: str, preload_size: int):
img = _read_img(x)
if preload_size:
h, w, *_ = img.shape
h_scale = preload_size / h
w_scale = preload_size / w
scale = max(h_scale, w_scale)
img = cv2.resize(img, fx=scale, fy=scale, dsize=None)
assert min(img.shape[:2]) >= preload_size, f'weird img shape: {img.shape}'
return img
def _preprocess(self, img, res): #通道转换
def transpose(x):
return np.transpose(x, (2, 0, 1))
return map(transpose, self.normalize_fn(img, res))
def __len__(self):
return len(self.data_a)
def __getitem__(self, idx):
a, b = self.data_a[idx], self.data_b[idx] # a: /media/cxq/Elements/dataset/GOPRO/train/GOPR0372_07_00/blur/000076.png b: /media/cxq/Elements/dataset/GOPRO/train/GOPR0372_07_00/sharp/000076.png
if not self.preload:
a, b = map(_read_img, (a, b)) # (720,1280,3), (720,1280,3)
a, b = self.transform_fn(a, b) # (256,256,3), (256,256,3)
if self.corrupt_fn is not None:
a = self.corrupt_fn(a)
a, b = self._preprocess(a, b) # (3,256,256), (3,256,256)
return {'a': a, 'b': b}
@staticmethod
def from_config(config):
config = deepcopy(config)
# 获取模糊和清晰所有图像的路径
files_a, files_b = map(lambda x: sorted(glob(config[x], recursive=True)), ('files_a', 'files_b'))
# 图像增强
transform_fn = aug.get_transforms(size=config['size'], scope=config['scope'], crop=config['crop'])
# 归一化操作
normalize_fn = aug.get_normalize()
#裁剪函数
corrupt_fn = aug.get_corrupt_function(config['corrupt'])
hash_fn = hash_from_paths
# ToDo: add more hash functions
verbose = config.get('verbose', True) # True
data = subsample(data=zip(files_a, files_b),
bounds=config.get('bounds', (0, 1)),
hash_fn=hash_fn,
verbose=verbose) # (1886,2)
files_a, files_b = map(list, zip(*data))
return PairedDataset(files_a=files_a,
files_b=files_b,
preload=config['preload'], # False
preload_size=config['preload_size'], # 0
corrupt_fn=corrupt_fn,
normalize_fn=normalize_fn,
transform_fn=transform_fn,
verbose=verbose)
| 39.858108
| 203
| 0.598576
|
8589e7c85ce275b8a9eebb39b303c738abe26160
| 206
|
py
|
Python
|
webapp/main/tests.py
|
joepetrini/bike-counter
|
e22190d7225ee54e7327efe43861f85c49c0bbd7
|
[
"MIT"
] | 5
|
2015-01-09T00:54:43.000Z
|
2021-06-16T20:46:45.000Z
|
webapp/main/tests.py
|
joepetrini/bike-counter
|
e22190d7225ee54e7327efe43861f85c49c0bbd7
|
[
"MIT"
] | 4
|
2015-06-30T12:04:22.000Z
|
2017-02-08T00:11:19.000Z
|
webapp/main/tests.py
|
joepetrini/bike-counter
|
e22190d7225ee54e7327efe43861f85c49c0bbd7
|
[
"MIT"
] | 2
|
2015-01-07T02:46:27.000Z
|
2015-07-01T19:43:03.000Z
|
from django.test import TestCase
from .models import Value, ValueSet
class ValidateSingleDefaultValue(TestCase):
def setUp(self):
pass
def test_single_default(self):
pass
| 18.727273
| 43
| 0.68932
|
b0124490c838bfb3ada5cada52cced29f07bd0b3
| 3,059
|
py
|
Python
|
VisionTransformersRobustness/VisionTransformersRobustness/TransformerConfigs.py
|
bergermeister/ViTRobust
|
f7ffa59978ad0dd49492b11ee05142b058c7078f
|
[
"BSD-3-Clause"
] | 6
|
2021-05-10T18:00:17.000Z
|
2022-02-25T11:39:33.000Z
|
VisionTransformersRobustness/VisionTransformersRobustness/TransformerConfigs.py
|
bergermeister/ViTRobust
|
f7ffa59978ad0dd49492b11ee05142b058c7078f
|
[
"BSD-3-Clause"
] | 2
|
2021-05-10T06:02:19.000Z
|
2021-05-12T00:22:29.000Z
|
VisionTransformersRobustness/VisionTransformersRobustness/TransformerConfigs.py
|
bergermeister/ViTRobust
|
f7ffa59978ad0dd49492b11ee05142b058c7078f
|
[
"BSD-3-Clause"
] | 3
|
2021-05-13T10:51:04.000Z
|
2021-09-03T07:28:41.000Z
|
#Original code from: https://github.com/jeonsworld/ViT-pytorch/blob/main/models/configs.py
import ml_collections
def get_testing():
"""Returns a minimal configuration for testing."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 1
config.transformer.num_heads = 1
config.transformer.num_layers = 1
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_r50_b16_config():
"""Returns the Resnet50 + ViT-B/16 configuration."""
config = get_b16_config()
del config.patches.size
config.patches.grid = (14, 14)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config
def get_b32_config():
"""Returns the ViT-B/32 configuration."""
config = get_b16_config()
config.patches.size = (32, 32)
return config
def get_l16_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1024
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 4096
config.transformer.num_heads = 16
config.transformer.num_layers = 24
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_l32_config():
"""Returns the ViT-L/32 configuration."""
config = get_l16_config()
config.patches.size = (32, 32)
return config
def get_h14_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (14, 14)})
config.hidden_size = 1280
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 5120
config.transformer.num_heads = 16
config.transformer.num_layers = 32
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
| 33.25
| 91
| 0.690749
|
232ae95c9ceab55fd3915149ed5dd8892c51c6f9
| 683
|
py
|
Python
|
tictactoe/use_cases/play.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | 1
|
2020-09-30T21:03:37.000Z
|
2020-09-30T21:03:37.000Z
|
tictactoe/use_cases/play.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | null | null | null |
tictactoe/use_cases/play.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | null | null | null |
from gloop.entities.player import Player
from tictactoe.adapters.client_channel import TicTacToeClientChannel as Client
from tictactoe.entities.match import TicTacToeMatch
from tictactoe.repositories.match import TicTacToeMatchRepository
class Play:
def __init__(self, client: Client, matches: TicTacToeMatchRepository):
self.client = client
self.matches = matches
async def execute(self, player: Player, match: TicTacToeMatch):
possible_moves = match.get_possible_moves()
move = await self.client.request_move(possible_moves)
move.apply(player, match)
await self.matches.insert_match(match)
match.yield_priority()
| 35.947368
| 78
| 0.756955
|
5d2ed891ad0518665fb95d0061ce1148c9b19428
| 816
|
py
|
Python
|
src/yoloannotator/image.py
|
s1n7ax/partially-annotator
|
3bc53e3cbdd49dab871bd9fbbf59eabfb90d31c5
|
[
"MIT"
] | null | null | null |
src/yoloannotator/image.py
|
s1n7ax/partially-annotator
|
3bc53e3cbdd49dab871bd9fbbf59eabfb90d31c5
|
[
"MIT"
] | null | null | null |
src/yoloannotator/image.py
|
s1n7ax/partially-annotator
|
3bc53e3cbdd49dab871bd9fbbf59eabfb90d31c5
|
[
"MIT"
] | null | null | null |
import cv2
class Image:
def __init__(self, img_path):
self.img_path = img_path
self.img = None
def get_path(self):
return self.img_path
def get_size(self):
h, w, _ = self.get_image().shape
return [w, h]
def get_blob(self, blob_width=None, blob_height=None):
img = self.get_image()
if blob_width is None:
blob_width = img.shape[1]
if blob_height is None:
blob_height = img.shape[0]
return cv2.dnn.blobFromImage(
img, 1/225,
(blob_width, blob_height),
[0, 0, 0], 1,
crop=False
)
def get_image(self):
if self.img is None:
print(self.img_path)
self.img = cv2.imread(self.img_path)
return self.img
| 20.923077
| 58
| 0.540441
|
3ede22ffbf5665671022fa2377fb08701d029bb9
| 3,938
|
py
|
Python
|
utils/plot_confusion_matrix.py
|
tinvukhac/learned-spatial-join
|
d52967fbfd506829bdb92719dc80b042a8119b7d
|
[
"Apache-2.0"
] | 6
|
2021-12-17T22:19:25.000Z
|
2022-03-17T23:35:04.000Z
|
utils/plot_confusion_matrix.py
|
tinvukhac/sjml-resources
|
f5805f726ccfa628c2dc20ad42eb8262f80bee94
|
[
"Apache-2.0"
] | null | null | null |
utils/plot_confusion_matrix.py
|
tinvukhac/sjml-resources
|
f5805f726ccfa628c2dc20ad42eb8262f80bee94
|
[
"Apache-2.0"
] | 2
|
2021-01-26T04:17:43.000Z
|
2021-02-16T16:10:13.000Z
|
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import itertools
from mpl_toolkits.axes_grid1 import ImageGrid
def plot_two_matrices(confusion_matrix_values, titles):
classes = ['BNLJ', 'PBSM', 'DJ', 'RepJ']
fig = plt.figure()
grid = ImageGrid(fig, 111, # as in plt.subplot(111)
nrows_ncols=(1, 2),
axes_pad=0.15,
cbar_location="right",
cbar_mode="single",
cbar_size="7%",
cbar_pad=0.15,
)
for n, ax in enumerate(grid[:2]):
# cm = np.random.random((2, 2))
cm = confusion_matrix_values[n]
im = ax.imshow(cm, vmin=0, vmax=1, cmap=plt.cm.Blues)
ax.set_title("{}".format(titles[n])) # ax.___ instead of plt.___
tick_marks = np.arange(4)
ax.set_xticks(tick_marks) # Warning: different signature for [x|y]ticks in pyplot and OO interface
ax.set_xticklabels(classes, rotation=0)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], '.3f'),
horizontalalignment="center",
color="black")
if confusion_matrix_values[n][i][j] > 0.7:
ax.text(j, i, format(cm[i, j], '.3f'),
horizontalalignment="center",
color="white")
ax.set_ylabel('Actual best algorithm')
ax.set_xlabel('Predicted algorithm')
# fig.tight_layout()
fig.subplots_adjust(right=0.8)
fig.colorbar(im, cax=ax.cax)
fig.savefig('../figures/confusion_matrix.png', bbox_inches='tight')
plt.show()
def main():
print('Plot confusion matrix')
# Note: somehow you need to run this file on terminal.
# I always get FileNotFoundError exception even the file path is correct
# Remove empty lines from Alberto's data
# f = open('../data/temp/algorithm_selection_b3_updated_5_31.alberto.csv')
# output_f = open('../data/temp/algorithm_selection_b3_updated_5_31.csv', 'w')
#
# lines = f.readlines()
#
# for line in lines:
# if len(line.strip()) > 0:
# output_f.writelines('{}\n'.format(line.strip()))
#
# output_f.close()
# f.close()
# Plot confusion matrix
# df = pd.read_csv('../data/temp/algorithm_selection_b3_updated_5_31.csv', header=0)
# y_test = df['y_test']
# y_pred = df['y_pred']
# cm = confusion_matrix(y_test, y_pred)
#
# class_names = ['BNLJ', 'PBSM', 'DJ', 'RepJ']
# cm = confusion_matrix(y_test, y_pred, labels=[1, 2, 3, 4], normalize='true')
# print(cm)
# disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=class_names)
# disp.plot(cmap=plt.cm.Blues)
# plt.xlabel('Predicted algorithm', fontsize=16)
# plt.ylabel('Actual best algorithm', fontsize=16)
# plt.savefig('../figures/confusion_matrix_with_normalization_b3.png')
confusion_matrix_values = []
# Compute fist confusion matrix
df = pd.read_csv('../data/temp/algorithm_selection_b3_updated_5_31.csv', header=0)
y_test = df['y_test']
y_pred = df['y_pred']
confusion_matrix_values.append(confusion_matrix(y_test, y_pred, labels=[1, 2, 3, 4], normalize='true'))
# Compute second confusion matrix
df = pd.read_csv('../data/temp/algorithm_selection_m3_fs3_v3.csv', header=0)
y_test = df['y_test']
y_pred = df['y_pred']
confusion_matrix_values.append(confusion_matrix(y_test, y_pred, labels=[1, 2, 3, 4], normalize='true'))
SUB = str.maketrans("0123456789", "₀₁₂₃₄₅₆₇₈₉")
titles = ['B3{}'.format('2'.translate(SUB)), 'M3']
plot_two_matrices(confusion_matrix_values, titles)
if __name__ == '__main__':
main()
| 35.477477
| 107
| 0.620366
|
ffb02e8120ed999cc83374795d167a341db659d3
| 18,007
|
py
|
Python
|
model.py
|
dingyunxing/James-code-paradise
|
688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a
|
[
"MIT"
] | null | null | null |
model.py
|
dingyunxing/James-code-paradise
|
688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a
|
[
"MIT"
] | null | null | null |
model.py
|
dingyunxing/James-code-paradise
|
688a8e6c8b569bacb6ac9f6754f43a5a3a7eba7a
|
[
"MIT"
] | null | null | null |
'''This program aims to build a multiple regression model
based on a given CSV file.
The whole project implementation by top-down design.
It includes the model construction and the prediction.
Main functions includes:
1.Open csv file and read it, all variables should be int or float.
2.Define one column as response variable and one or some others as control
variables.
3.Use ordinary least square algorithm to optimal solution.
4.Based on cross-validation.
5.Predict the response variables by the optimal solution.
Author: Yunxing Ding
'''
import numpy as np
import random
import time
from gradient_descent import GradientDescent
# Define an very large error, used for initial difference between errors
ERROR = 999999999999
# **********************************************************************
#
# Define all the functions that manipulate the file input.
# The dataframe should be constructed with these functions.
# These functions don't "know" anything about response or control varialbes.
# An entrance function at last contains all these functions.
#
# **********************************************************************
def read_data(filename):
'''a function to read file and return a list of data'''
datafile = open(filename)
data = datafile.readlines()
datafile.close()
return data
def open_file():
"""a function that open the file"""
opened = False
while not opened:
filename = input('Please input the name of file:(*.csv) ')
try:
file = read_data(filename)
opened = True
except FileNotFoundError:
print("Can't find the file! Don't fool me!")
opened = False
return file
def process_data(data):
'''a function to process the data and return a nest list'''
list_content = []
for line in data:
columns = line.strip().split(',')
list_content.append(columns)
return list_content
def print_file_head(file, n=5):
'''display the file head, default is first 5 line'''
for item in file[:n]:
print(item, end='\n')
def entrance():
'''an entrance function that leads the user to the program and
show the dataframe'''
print("Welcome to our magic world!^_^" + "\n")
dataframe = process_data(open_file())
times = int\
(input("How many rows do you want the dataframe to show?(at least 2) "))
print_file_head(dataframe, times)
return dataframe
# **********************************************************************
#
# Now we have some functions that split the data with "columns" and "rows".
# Both response and control varialbes are defined and the data are selected
# Train, Test and validate set are splited.
# They do not "know" about the cross-validation and OLS algorithm.
#
# **********************************************************************
def get_y_num(dataframe, y_variable):
'''a funtion return the number of y column'''
for num in range(len(dataframe[0])):
if dataframe[0][num] == y_variable:
n = num
return n
def get_y_matrix(dataframe, n):
'''a function to get the y list'''
y_list = []
for i in range(1, len(dataframe)):
y_list.append(float(dataframe[i][n]))
return y_list
def y_input_operate(dataframe):
'''a function to recieve the y_variable input'''
value = False
while not value:
y_variable = input("Please input the name of response variable: ")
if y_variable in dataframe[0]:
n = get_y_num(dataframe, y_variable)
y_matrix = get_y_matrix(dataframe, n)
value = True
else:
print("please input the right name!")
value = False
return y_matrix, y_variable
def get_x_variable(dataframe, x_variable):
'''a function to get the x comlums'''
for num in range(len(dataframe[0])):
if dataframe[0][num] == x_variable:
n = num
return n
def x_variable_or_not():
'''funtion that return True if not want to continue to add control variable
or return False if want to continue to add more control vairalbe
'''
input_x = False
while not input_x:
back = input\
("Do you want to continue to enter one more control variable? (y/n)")
if back == "y":
break
elif back == "n":
input_x = True
else:
print("Sorry, I don't understand what do you want.")
input_x = False
return input_x
def x_num_list_generate(dataframe):
'''a function to recieve the x varialbe input'''
x_num_list = []
x_name_list = []
lenth = len(dataframe[0])
# judge is True when no more control varialbe input
# judge is False when more control varialbe input
judge = False
while lenth > 1 and not judge: # one space for y varialbe
print("\n")
x_variable = input("Please input the name of control vairalbe: ")
if x_variable not in dataframe[0]:
print("Please input the right name!")
else:
if x_variable not in x_name_list:
n = get_x_variable(dataframe, x_variable)
x_num_list.append(n)
x_name_list.append(x_variable)
lenth -= 1
judge = x_variable_or_not()
else:
print("You have added this control varialbe, don't input again")
return x_num_list, x_name_list
def get_x_matrix(dataframe, x_num_list):
'''a function to get the final whole x_matirx'''
x_matrix = []
for i in range(1, len(dataframe)):
# add 1 to each row of the dataset to generate a constant column
x_list = [1]
for j in x_num_list:
x_list.append(float(dataframe[i][j]))
x_matrix.append(x_list)
return x_matrix
def remove_index(dataframe, index_test):
'''a function to remove the index of test set after split the test set.
This will avoid the replicate of test set and validate set
'''
lenth = len(dataframe) - 1
whole_list = list(range(lenth))
for i in index_test:
whole_list.remove(i)
return whole_list
def dataset_split(dataframe, random_test=0.2, random_validate=0.1):
'''a function that split data frame into training set, test set and
validate set, return the serial number:
Training set is used to train the model
Test set is used to test perforamce
validate set is used to validate if there are overfitting exits
The default part is 7:2:1
'''
whole_lenth = len(dataframe) - 1
test_index = random.sample(range(whole_lenth),\
round(whole_lenth*random_test))
rest_index = remove_index(dataframe, test_index)
rest_lenth = len(rest_index)
validate_index = random.sample(range(rest_lenth),\
round(rest_lenth*random_validate))
list_train = []
list_test = []
list_validate = []
for item in range(whole_lenth):
if item in test_index:
list_test.append(item)
elif item in validate_index:
list_validate.append(item)
else:
list_train.append(item)
return list_train,list_test,list_validate
def proportion_judge():
'''a function that give judge to the proportion input
return the proportion to test and validation set respectively
'''
f = False
while not f:
p1 = float(input("Please set the test proportion(0-0.5): "))
p2 = float(input\
("Please set the validate proportion(0-0.5)." +"\n" +\
"(Notice:validate proportion usually should be smaller than 0.2): "))
if p1 <= 0 or p2 <= 0 or (p1 + p2) >= 1:
print("Invalid input! What are you doing?!")
f = False
else:
f = True
return p1, p2
def set_proportion(dataframe):
'''a funtion return the serial of training set, the test set
and the validation set based on the user's input
'''
print\
("The default proportion of train set, test set and validate set is 7:2:1.")
print("\n")
setup = False
while not setup:
x = input("Do you want to set the proportion yourself?(y/n) ")
if x == 'y':
p1, p2 = proportion_judge()
serial_train = dataset_split(dataframe, p1, p2)[0]
serial_test = dataset_split(dataframe, p1, p2)[1]
serial_validate = dataset_split(dataframe, p1, p2)[2]
setup = True
elif x == 'n':
print('You are too lazy!')
print('Ok, I will do it by default')
serial_train = dataset_split(dataframe)[0]
serial_test = dataset_split(dataframe)[1]
serial_validate = dataset_split(dataframe)[2]
setup = True
else:
print('Invalid input! Please input it again.(y/n)')
setup = False
return serial_train, serial_test, serial_validate
def y_split_matrix(y_matrix, serial):
'''a function that split the y matrix according to the serial
when serial is train, the y_split is train set
when serial is test, the y_split is test set
when serial is valiadation, the y_split is validation set
'''
y_split = []
for i in serial:
y_split.append(y_matrix[i])
return np.array(y_split)
def x_split_matrix(x_matrix, serial):
'''a function that split the y matrix according to the serial
when serial is train, the y_split is train set
when serial is test, the y_split is test set
when serial is valiadation, the y_split is validation set
'''
x_split = []
for i in serial:
x_split.append(x_matrix[i])
return np.array(x_split)
def control_varialbe_show(x_num_list, x_name_list):
'''a function that list the control varialbes'''
print("\n")
print\
("You have added {} control variables in total".format(len(x_num_list)))
print("They are {}".format(x_name_list))
def data_split_process(dataframe, y_matrix, x_matrix):
'''a function that split the data in different data set'''
serial_train, serial_test, serial_validate = set_proportion(dataframe)
x = x_split_matrix(x_matrix, serial_train)
y = y_split_matrix(y_matrix, serial_train)
# **********************************************************************
#
# Now these functions are about the algorithm, OLS on cross-validation
# With these functions, the final coefficients will be worked out
#
# **********************************************************************
def OLS(y, x):
'''a functon to calculate the coefficient of each feature
return beta, which is a matrix of coefficients of all the control variables
'''
x_trans = x.T
x_square_mat = x_trans.dot(x)
x_trans_y = x_trans.dot(y)
# formular: beta = (X.T*X)-1 * X.T *Y
beta = (np.mat(x_square_mat).I).dot(x_trans_y)
return beta
def test_diff(y, x, y_test, x_test):
'''a funtion to calculate the the difference on test set.
return a tuple with difference and coefficient
'''
if alg == "O":
coeff = OLS(y, x)
else:
coeff = GradientDescent(y, x)
diff = 0
for i in range(len(y_test)):
diff += (y_test[i] - (coeff.dot(x_test[i].T))) ** 2
return (diff, coeff)
def test_times(dataframe, y_matrix, x_matrix, n=10):
'''a function that calculate the best coefficents and return it with errors
The default times will be 10 times
'''
error1 = ERROR
coef1 = []
while n > 0:
# set the train and test serial
serial_train = dataset_split(dataframe)[0]
serial_test = dataset_split(dataframe)[1]
# set the training and test set for x and y matrix
x = x_split_matrix(x_matrix, serial_train)
y = y_split_matrix(y_matrix, serial_train)
x_test = x_split_matrix(x_matrix, serial_test)
y_test = y_split_matrix(y_matrix, serial_test)
error2, coef2 = test_diff(y, x, y_test, x_test)
# update error and coefficient
if error2 < error1:
error1 = error2
coef1 = coef2
else:
n -= 1
return coef1, error1
def validate_best(dataframe, y_matrix, x_matrix):
'''a function that validate the difference on validate set
and return the differece
'''
# set the validate serial
serial_validate = dataset_split(dataframe)[2]
# set the validate set for x and y matrix
x = x_split_matrix(x_matrix, serial_validate)
y = y_split_matrix(y_matrix, serial_validate)
# initial the square of difference between real y and estimated y as 0
diff = 0
coeff = test_times(dataframe, y_matrix, x_matrix)[0]
for i in range(len(y)):
diff += (y[i] - (coeff.dot(x[i].T))) ** 2
return diff
def count_time(m):
'''a function to display the effect of countdown'''
count = 0
while (count < m):
count += 1
n = m - count
time.sleep(1)
print(n + 1, "times left")
def satisfy_or_not(dataframe, y_matrix, x_matrix, coef1, sst1, error1, n=10):
'''a funtion that judge if the user is satisfy with the total sum square'''
satisfy = False
coef = coef1
sst = sst1
error = error1
while not satisfy:
judge = input("Do you satisfiy to the validation result? (y/n) ")
if judge == "n":
print('*' * 80)
coef1, error1 = test_times(dataframe, y_matrix, x_matrix, n)
sst = validate_best(dataframe, y_matrix, x_matrix)
count_time(n)
print("The updated SST on test set is:", error1)
print("The updated SST on validation set is:", sst)
satisfy = False
elif judge == "y":
print('*' * 80)
satisfy = True
else:
print("The input is invalid, please input again. (y/n)")
satisfy = False
print("The final best coefficient matrix after {} times validation is:"\
.format(n) + '\n', coef1)
print("The SST on validation set is", sst)
return coef1
def cross_validation(dataframe, y_matrix, x_matrix):
'''a function that excutes the process of the cross-validation'''
n = int\
(input("How many times do you want to do the cross validation? (1-50)"))
count_time(n)
coef1, error1 = test_times(dataframe, y_matrix, x_matrix, n)
sst1 = validate_best(dataframe, y_matrix, x_matrix)
print("The final best coefficient matrix after {} times validation is:"\
.format(n) + '\n', coef1)
print()
print("*" * 80)
print("The SST on test set is:", error1)
print("The SST on validation set is:", sst1)
return coef1, sst1, error1, n
# **********************************************************************
#
# Here the last functions are choosing prediction or quit the program.
#
# **********************************************************************
def get_predict_x(dataframe, x_name_list):
'''a function that get the predicted value of responce varaible'''
list_predict_x = [1]
list_name = x_name_list
x = 0
for i in list_name:
x = float(input("Please input the value of {}: ".format(i)))
list_predict_x.append(x)
return list_predict_x
def quit_or_predict(dataframe, x_name_list, y_variable, c):
'''a function asks whether continue predict or quit'''
quit = False
while not quit:
a = input\
("Do you want to continue prediction or quit the program?(continue/quit) ")
if a == 'continue':
print('\n' * 2)
list1 = get_predict_x(dataframe, x_name_list)
predict_y = c.dot(np.array(list1).reshape([len(list1), 1]))
print("the final predict {} value is:".\
format(y_variable), predict_y)
quit = False
elif a == 'quit':
print('*' * 80)
print("Thank you for using this amazing program!!!")
print("See you next time!")
quit = True
else:
print("The input is invalid, please input again!(continue/quit)")
quit = False
def predict(dataframe, x_name_list, y_variable, c):
'''a funtion to execute a series of action to predict values'''
print('*' * 80)
print("Now we got the model, we can begin to predict!!!")
print('\n' * 2)
list_input = get_predict_x(dataframe, x_name_list)
predict_y = c.dot(np.array(list_input).reshape([len(list_input), 1]))
print('*' * 80)
print("the final predict {} value is:".format(y_variable), predict_y)
quit_or_predict(dataframe, x_name_list, y_variable, c)
# **********************************************************************
#
# Now at last the main function and the call to it
#
# **********************************************************************
if __name__ == '__main__':
dataframe = entrance() # read the file and display the dataframe
# Define x and y varialbes and the corresponding data matrix
y_matrix, y_variable = y_input_operate(dataframe)
print("You have selected {} as the responce varialbe".format(y_variable))
global alg
alg = input("OLS or GradientDescent? (O/G) ")
x_num_list, x_name_list = x_num_list_generate(dataframe)
x_matrix = get_x_matrix(dataframe, x_num_list)
control_varialbe_show(x_num_list, x_name_list)
data_split_process(dataframe, y_matrix, x_matrix)
coef1, sst1, error1, n = cross_validation(dataframe, y_matrix, x_matrix)
# c is the final coefficient matrix can be used in prediction
c = satisfy_or_not(dataframe, y_matrix, x_matrix, coef1, sst1, error1, n)
print("Control varialbes are {}".format(x_name_list))
# predict or quit
predict(dataframe, x_name_list, y_variable, c)
| 34.762548
| 84
| 0.605376
|
9c5c43ab4a24df83bfadd5744e46aafdb6262f0b
| 10,125
|
py
|
Python
|
hybrid/battery.py
|
NREL/HOPP
|
824334df055d897d38c055e8b9197f478bac2cb6
|
[
"BSD-3-Clause"
] | 3
|
2021-03-10T20:03:42.000Z
|
2022-03-18T17:10:04.000Z
|
hybrid/battery.py
|
NREL/HOPP
|
824334df055d897d38c055e8b9197f478bac2cb6
|
[
"BSD-3-Clause"
] | 14
|
2020-12-28T22:32:07.000Z
|
2022-03-17T15:33:04.000Z
|
hybrid/battery.py
|
NREL/HOPP
|
824334df055d897d38c055e8b9197f478bac2cb6
|
[
"BSD-3-Clause"
] | 8
|
2021-01-19T02:39:01.000Z
|
2022-01-31T18:04:39.000Z
|
from typing import Sequence
import PySAM.BatteryStateful as BatteryModel
import PySAM.BatteryTools as BatteryTools
import PySAM.Singleowner as Singleowner
from hybrid.power_source import *
class Battery_Outputs:
def __init__(self, n_timesteps):
""" Class of stateful battery outputs
"""
self.stateful_attributes = ['I', 'P', 'Q', 'SOC', 'T_batt', 'gen']
for attr in self.stateful_attributes:
setattr(self, attr, [0.0]*n_timesteps)
# dispatch output storage
dispatch_attributes = ['I', 'P', 'SOC']
for attr in dispatch_attributes:
setattr(self, 'dispatch_'+attr, [0.0]*n_timesteps)
class Battery(PowerSource):
_system_model: BatteryModel.BatteryStateful
_financial_model: Singleowner.Singleowner
module_specs = {'capacity': 400, 'surface_area': 30} # 400 [kWh] -> 30 [m^2]
def __init__(self,
site: SiteInfo,
battery_config: dict,
chemistry: str = 'lfpgraphite',
system_voltage_volts: float = 500):
"""
:param battery_config: dict, with keys ('system_capacity_kwh', 'system_capacity_kw')
:param chemistry:
:param system_voltage_volts:
"""
for key in ('system_capacity_kwh', 'system_capacity_kw'):
if key not in battery_config.keys():
raise ValueError
system_model = BatteryModel.default(chemistry)
self.system_capacity_kw: float = battery_config['system_capacity_kw']
financial_model = Singleowner.from_existing(system_model, "GenericBatterySingleOwner")
super().__init__("Battery", site, system_model, financial_model)
self.Outputs = Battery_Outputs(n_timesteps=site.n_timesteps)
self.chemistry = chemistry
BatteryTools.battery_model_sizing(self._system_model,
battery_config['system_capacity_kw'],
battery_config['system_capacity_kwh'],
system_voltage_volts,
module_specs=Battery.module_specs)
self._system_model.ParamsPack.h = 20
self._system_model.ParamsPack.Cp = 900
self._system_model.ParamsCell.resistance = 0.001
# Minimum set of parameters to set to get statefulBattery to work
self._system_model.value("control_mode", 0.0)
self._system_model.value("input_current", 0.0)
self._system_model.value("dt_hr", 1.0)
self._system_model.value("minimum_SOC", 10.0)
self._system_model.value("maximum_SOC", 90.0)
self._system_model.value("initial_SOC", 10.0)
self._dispatch = None # TODO: this could be the union of the models
logger.info("Initialized battery with parameters and state {}".format(self._system_model.export()))
@property
def system_capacity_voltage(self) -> tuple:
return self._system_model.ParamsPack.nominal_energy, self._system_model.ParamsPack.nominal_voltage
@system_capacity_voltage.setter
def system_capacity_voltage(self, capacity_voltage: tuple):
"""
Sets the system capacity and voltage, and updates the system, cost and financial model
:param capacity_voltage:
:return:
"""
size_kwh = capacity_voltage[0]
voltage_volts = capacity_voltage[1]
# sizing function may run into future issues if size_kwh == 0 is allowed
if size_kwh == 0:
size_kwh = 1e-7
BatteryTools.battery_model_sizing(self._system_model,
0.,
size_kwh,
voltage_volts,
module_specs=Battery.module_specs)
logger.info("Battery set system_capacity to {} kWh".format(size_kwh))
logger.info("Battery set system_voltage to {} volts".format(voltage_volts))
@property
def system_capacity_kwh(self) -> float:
return self._system_model.ParamsPack.nominal_energy
@system_capacity_kwh.setter
def system_capacity_kwh(self, size_kwh: float):
"""
Sets the system capacity and updates the system, cost and financial model
:param size_kwh:
"""
self.system_capacity_voltage = (size_kwh, self.system_voltage_volts)
@property
def system_capacity_kw(self) -> float:
return self._system_capacity_kw
@system_capacity_kw.setter
def system_capacity_kw(self, size_kw: float):
"""
Sets the system capacity and updates the system, cost and financial model
:param size_kw:
"""
# TODO: update financial model?
self._system_capacity_kw = size_kw
@property
def system_voltage_volts(self) -> float:
return self._system_model.ParamsPack.nominal_voltage
@system_voltage_volts.setter
def system_voltage_volts(self, voltage_volts: float):
"""
Sets the system voltage and updates the system, cost and financial model
:param voltage_volts:
:return:
"""
self.system_capacity_voltage = (self.system_capacity_kwh, voltage_volts)
@property
def chemistry(self) -> str:
model_type = self._system_model.ParamsCell.chem
if model_type == 0 or model_type == 1:
return self._chemistry
else:
raise ValueError("chemistry model type unrecognized")
@chemistry.setter
def chemistry(self, battery_chemistry: str):
"""
Sets the system chemistry and updates the system, cost and financial model
:param battery_chemistry:
:return:
"""
BatteryTools.battery_model_change_chemistry(self._system_model, battery_chemistry)
self._chemistry = battery_chemistry
logger.info("Battery chemistry set to {}".format(battery_chemistry))
def _simulate_with_dispatch(self, n_periods: int, sim_start_time: int = None):
"""
Step through dispatch solution for battery and simulate battery
"""
# TODO: This is specific to the Stateful battery model
# Set stateful control value [Discharging (+) + Charging (-)]
if self.value("control_mode") == 1.0:
control = [pow_MW*1e3 for pow_MW in self.dispatch.power] # MW -> kW
elif self.value("control_mode") == 0.0:
control = [cur_MA * 1e6 for cur_MA in self.dispatch.current] # MA -> A
else:
raise ValueError("Stateful battery module 'control_mode' invalid value.")
time_step_duration = self.dispatch.time_duration
for t in range(n_periods):
self.value('dt_hr', time_step_duration[t])
self.value(self.dispatch.control_variable, control[t])
# Only store information if passed the previous day simulations (used in clustering)
try:
index_time_step = sim_start_time + t # Store information
except TypeError:
index_time_step = None # Don't store information
self.simulate(time_step=index_time_step)
# Store Dispatch model values
if sim_start_time is not None:
time_slice = slice(sim_start_time, sim_start_time + n_periods)
self.Outputs.dispatch_SOC[time_slice] = self.dispatch.soc[0:n_periods]
self.Outputs.dispatch_P[time_slice] = self.dispatch.power[0:n_periods]
self.Outputs.dispatch_I[time_slice] = self.dispatch.current[0:n_periods]
# logger.info("Battery Outputs at start time {}".format(sim_start_time, self.Outputs))
def simulate(self, time_step=None):
"""
Runs battery simulate stores values if time step is provided
"""
if not self._system_model:
return
self._system_model.execute(0)
if time_step is not None:
self.update_battery_stored_values(time_step)
# TODO: Do we need to update financial model after battery simulation is complete?
def update_battery_stored_values(self, time_step):
# Physical model values
for attr in self.Outputs.stateful_attributes:
if hasattr(self._system_model.StatePack, attr):
getattr(self.Outputs, attr)[time_step] = self.value(attr)
else:
if attr == 'gen':
getattr(self.Outputs, attr)[time_step] = self.value('P')
def simulate_financials(self, project_life):
# TODO: updated replacement values -> based on usage...
try:
self._financial_model.BatterySystem.batt_bank_replacement
except:
self._financial_model.BatterySystem.batt_bank_replacement = [0] * (project_life + 1)
if project_life > 1:
self._financial_model.Lifetime.system_use_lifetime_output = 1
else:
self._financial_model.Lifetime.system_use_lifetime_output = 0
self._financial_model.FinancialParameters.analysis_period = project_life
self._financial_model.value("construction_financing_cost", self.get_construction_financing_cost())
self._financial_model.Revenue.ppa_soln_mode = 1
# TODO: out to get SystemOutput.gen to populate?
# if len(self._financial_model.SystemOutput.gen) == self.site.n_timesteps:
if len(self.Outputs.gen) == self.site.n_timesteps:
single_year_gen = self.Outputs.gen
self._financial_model.SystemOutput.gen = list(single_year_gen) * project_life
self._financial_model.SystemOutput.system_pre_curtailment_kwac = list(single_year_gen) * project_life
self._financial_model.SystemOutput.annual_energy_pre_curtailment_ac = sum(single_year_gen)
self._financial_model.execute(0)
logger.info("{} simulation executed".format('battery'))
@property
def generation_profile(self) -> Sequence:
if self.system_capacity_kwh:
return self.Outputs.gen
else:
return [0] * self.site.n_timesteps
| 41.158537
| 113
| 0.645827
|
ac5016e29213301a21d414af36f73a192e757f39
| 17,946
|
py
|
Python
|
zerver/tests/test_message_edit_notifications.py
|
bongjlee/zulip
|
dc95d6e5ca320a241b569b43ca970196953c73d4
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_message_edit_notifications.py
|
bongjlee/zulip
|
dc95d6e5ca320a241b569b43ca970196953c73d4
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_message_edit_notifications.py
|
bongjlee/zulip
|
dc95d6e5ca320a241b569b43ca970196953c73d4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Any, Dict, Mapping, Union
import mock
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
get_client,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
get_stream_recipient,
Subscription,
UserPresence,
)
from zerver.tornado.event_queue import (
maybe_enqueue_notifications,
)
class EditMessageSideEffectsTest(ZulipTestCase):
def _assert_update_does_not_notify_anybody(self, message_id: int, content: str) -> None:
url = '/json/messages/' + str(message_id)
request = dict(
message_id=message_id,
content=content,
)
with mock.patch('zerver.tornado.event_queue.maybe_enqueue_notifications') as m:
result = self.client_patch(url, request)
self.assert_json_success(result)
self.assertFalse(m.called)
def test_updates_with_pm_mention(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
self.login(hamlet.email)
message_id = self.send_personal_message(
hamlet.email,
cordelia.email,
content='no mention'
)
self._assert_update_does_not_notify_anybody(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
def _login_and_send_original_stream_message(self, content: str) -> int:
'''
Note our conventions here:
Hamlet is our logged in user (and sender).
Cordelia is the receiver we care about.
Scotland is the stream we send messages to.
'''
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
self.login(hamlet.email)
self.subscribe(hamlet, 'Scotland')
self.subscribe(cordelia, 'Scotland')
message_id = self.send_stream_message(
hamlet.email,
'Scotland',
content=content,
)
return message_id
def _get_queued_data_for_message_update(self, message_id: int, content: str,
expect_short_circuit: bool=False) -> Dict[str, Any]:
'''
This function updates a message with a post to
/json/messages/(message_id).
By using mocks, we are able to capture two pieces of data:
enqueue_kwargs: These are the arguments passed in to
maybe_enqueue_notifications.
queue_messages: These are the messages that
maybe_enqueue_notifications actually
puts on the queue.
Using this helper allows you to construct a test that goes
pretty deep into the missed-messages codepath, without actually
queuing the final messages.
'''
url = '/json/messages/' + str(message_id)
request = dict(
message_id=message_id,
content=content,
)
with mock.patch('zerver.tornado.event_queue.maybe_enqueue_notifications') as m:
result = self.client_patch(url, request)
cordelia = self.example_user('cordelia')
cordelia_calls = [
call_args
for call_args in m.call_args_list
if call_args[1]['user_profile_id'] == cordelia.id
]
if expect_short_circuit:
self.assertEqual(len(cordelia_calls), 0)
return {}
# Normally we expect maybe_enqueue_notifications to be
# called for Cordelia, so continue on.
self.assertEqual(len(cordelia_calls), 1)
enqueue_kwargs = cordelia_calls[0][1]
queue_messages = []
def fake_publish(queue_name: str,
event: Union[Mapping[str, Any], str],
*args: Any) -> None:
queue_messages.append(dict(
queue_name=queue_name,
event=event,
))
with mock.patch('zerver.tornado.event_queue.queue_json_publish') as m:
m.side_effect = fake_publish
maybe_enqueue_notifications(**enqueue_kwargs)
self.assert_json_success(result)
return dict(
enqueue_kwargs=enqueue_kwargs,
queue_messages=queue_messages
)
def test_updates_with_stream_mention(self) -> None:
message_id = self._login_and_send_original_stream_message(
content='no mention',
)
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
cordelia = self.example_user('cordelia')
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
wildcard_mention_notify=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=True,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
self.assertEqual(len(queue_messages), 2)
self.assertEqual(queue_messages[0]['queue_name'], 'missedmessage_mobile_notifications')
mobile_event = queue_messages[0]['event']
self.assertEqual(mobile_event['user_profile_id'], cordelia.id)
self.assertEqual(mobile_event['trigger'], 'mentioned')
self.assertEqual(queue_messages[1]['queue_name'], 'missedmessage_emails')
email_event = queue_messages[1]['event']
self.assertEqual(email_event['user_profile_id'], cordelia.id)
self.assertEqual(email_event['trigger'], 'mentioned')
def test_second_mention_is_ignored(self) -> None:
message_id = self._login_and_send_original_stream_message(
content='hello @**Cordelia Lear**'
)
self._get_queued_data_for_message_update(
message_id=message_id,
content='re-mention @**Cordelia Lear**',
expect_short_circuit=True,
)
def _turn_on_stream_push_for_cordelia(self) -> None:
'''
conventions:
Cordelia is the message receiver we care about.
Scotland is our stream.
'''
cordelia = self.example_user('cordelia')
stream = self.subscribe(cordelia, 'Scotland')
recipient = get_stream_recipient(stream.id)
cordelia_subscription = Subscription.objects.get(
user_profile_id=cordelia.id,
recipient=recipient,
)
cordelia_subscription.push_notifications = True
cordelia_subscription.save()
def test_updates_with_stream_push_notify(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# Even though Cordelia configured this stream for pushes,
# we short-ciruit the logic, assuming the original message
# also did a push.
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def _cordelia_connected_to_zulip(self) -> Any:
'''
Right now the easiest way to make Cordelia look
connected to Zulip is to mock the function below.
This is a bit blunt, as it affects other users too,
but we only really look at Cordelia's data, anyway.
'''
return mock.patch(
'zerver.tornado.event_queue.receiver_is_off_zulip',
return_value=False
)
def test_stream_push_notify_for_sorta_present_user(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# Simulate Cordelia still has an actively polling client, but
# the lack of presence info should still mark her as offline.
#
# Despite Cordelia being offline, we still short circuit
# offline notifications due to the her stream push setting.
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def _make_cordelia_present_on_web(self) -> None:
cordelia = self.example_user('cordelia')
UserPresence.objects.create(
user_profile_id=cordelia.id,
status=UserPresence.ACTIVE,
client=get_client('web'),
timestamp=timezone_now(),
)
def test_stream_push_notify_for_fully_present_user(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def test_always_push_notify_for_fully_present_mentioned_user(self) -> None:
cordelia = self.example_user('cordelia')
cordelia.enable_online_push_notifications = True
cordelia.save()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='newly mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
wildcard_mention_notify=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=True,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
self.assertEqual(len(queue_messages), 1)
def test_always_push_notify_for_fully_present_boring_user(self) -> None:
cordelia = self.example_user('cordelia')
cordelia.enable_online_push_notifications = True
cordelia.save()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=False,
wildcard_mention_notify=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=True,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
# Even though Cordelia has enable_online_push_notifications set
# to True, we don't send her any offline notifications, since she
# was not mentioned.
self.assertEqual(len(queue_messages), 0)
def test_updates_with_stream_mention_of_sorta_present_user(self) -> None:
cordelia = self.example_user('cordelia')
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# We will simulate that the user still has a an active client,
# but they don't have UserPresence rows, so we will still
# send offline notifications.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
wildcard_mention_notify=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=True,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
# She will get messages enqueued. (Other tests drill down on the
# actual content of these messages.)
self.assertEqual(len(info['queue_messages']), 2)
def test_updates_with_wildcard_mention(self) -> None:
cordelia = self.example_user('cordelia')
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# We will simulate that the user still has a an active client,
# but they don't have UserPresence rows, so we will still
# send offline notifications.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**all**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=False,
wildcard_mention_notify=True,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=True,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
# She will get messages enqueued.
self.assertEqual(len(info['queue_messages']), 2)
def test_updates_with_upgrade_wildcard_mention(self) -> None:
message_id = self._login_and_send_original_stream_message(
content='Mention @**all**'
)
# If there was a previous wildcard mention delivered to the
# user (because wildcard_mention_notify=True), we don't notify
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
expect_short_circuit=True,
)
def test_updates_with_upgrade_wildcard_mention_disabled(self) -> None:
# If the user has disabled notifications for wildcard
# mentions, they won't have been notified at first, which
# means they should be notified when the message is edited to
# contain a wildcard mention.
#
# This is a bug that we're not equipped to fix right now.
cordelia = self.example_user('cordelia')
cordelia.wildcard_mentions_notify = False
cordelia.save()
message_id = self._login_and_send_original_stream_message(
content='Mention @**all**'
)
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
expect_short_circuit=True,
)
def test_updates_with_stream_mention_of_fully_present_user(self) -> None:
cordelia = self.example_user('cordelia')
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
wildcard_mention_notify=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
# Because Cordelia is FULLY present, we don't need to send any offline
# push notifications or missed message emails.
self.assertEqual(len(info['queue_messages']), 0)
| 34.914397
| 96
| 0.627549
|
3f83426018116f6ddbe53e4f8528b29b81081dab
| 203
|
py
|
Python
|
IVTp/2014/SOBOLEV_M_V/task_5_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTp/2014/SOBOLEV_M_V/task_5_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
IVTp/2014/SOBOLEV_M_V/task_5_24.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
import random
print("Программа случайным образом отображает название одной из шахматных фигур \n\n"+random.choice(['пешка','слон','конь', 'ладья', 'ферзь','король']))
input ("Нажмите Enter для выхода.")
| 50.75
| 152
| 0.738916
|
d746a2cf56f6d60212e224e7557f4af3f39f0206
| 12,324
|
py
|
Python
|
src/tests/biblerAPI_test.py
|
badbarde/bibler-server
|
b9a8faf21127e0d2678f4411ce16760b4fe4602f
|
[
"MIT"
] | null | null | null |
src/tests/biblerAPI_test.py
|
badbarde/bibler-server
|
b9a8faf21127e0d2678f4411ce16760b4fe4602f
|
[
"MIT"
] | null | null | null |
src/tests/biblerAPI_test.py
|
badbarde/bibler-server
|
b9a8faf21127e0d2678f4411ce16760b4fe4602f
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
from datetime import datetime
import bibler.biblerAPI as biblerAPI
import pandas as pd
import pytest
from bibler.biblerAPI import Session, bibler
from bibler.dataclasses.BorrowingUser import BorrowingUser
from dateutil.relativedelta import relativedelta
from fastapi.testclient import TestClient
from requests.sessions import session
from starlette import responses
@pytest.fixture
def uut():
"""Unit under test"""
return TestClient(bibler)
def create_book_test_data(session):
data = [
{
"key": 0,
"title": "Sabriel",
"author": "Garth Nix",
"publisher": "Carlsen",
"number": 1,
"shorthand": "Car",
"category": "Fantasy",
"isbn": "3-551-58128-2",
}
]
def create_books_test_data(session):
data = [
{
"key": 0,
"title": "Sabriel",
"author": "Garth Nix",
"publisher": "Carlsen",
"number": 1,
"shorthand": "Car",
"category": "Fantasy",
"isbn": "3-551-58128-2",
},
{
"key": 1,
"title": "Die granulare Gesellschaft",
"author": "Christoph Kucklick",
"publisher": "Ullstein",
"number": 2,
"shorthand": "Ull",
"category": "Sachbuch",
"isbn": "978-3-548-37625-7",
}
]
def create_user_test_data(session):
data = [
{
"key": 0,
"firstname": "Lukas",
"lastname": "Schmidt",
"classname": "5c"
}
]
def create_users_test_data(session):
data = [
{
"key": 0,
"firstname": "Lukas",
"lastname": "Schmidt",
"classname": "5c"
},
{
"key": 1,
"firstname": "Alice",
"lastname": "Schmidt",
"classname": "lehrer*in"
}
]
def test_get_user(uut: TestClient, tmpdir, caplog):
"""test getting a user if only one exists"""
# given
caplog.set_level(logging.INFO)
session = Session()
create_user_test_data(session)
session.commit()
# when
users = uut.get("/users")
# then
assert users.status_code == 200
assert users.json() == [
{
"key": 0,
"firstname": "Lukas",
"lastname": "Schmidt",
"classname": "5c"
}
]
def test_get_users(uut: TestClient, tmpdir, caplog):
"""test getting users"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_users_test_data(tmpdir)
# when
users = uut.get("/users")
# then
assert users.status_code == 200
assert users.json() == [
{
"key": 0,
"firstname": "Lukas",
"lastname": "Schmidt",
"classname": "5c"
},
{
"key": 1,
"firstname": "Alice",
"lastname": "Schmidt",
"classname": "lehrer*in"
}
]
def test_put_user(uut: TestClient, tmpdir, caplog):
"""test inserting a user"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
# when
users = uut.put("/user", json={
"key": 1,
"firstname": "Alice",
"lastname": "Schmidt",
"classname": "lehrer*in"
})
# then
assert users.json() == {"status": "user created"}
def test_put_user_twice(uut: TestClient, tmpdir, caplog):
"""test inserting a user that is the same as another user
NOTE: this is INTENTIONALLY allowed"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
# when
users = uut.put("/user", json={
"key": 0,
"firstname": "Lukas",
"lastname": "Schmidt",
"classname": "5c"
})
# then
assert users.json() == {"status": "user created"}
def test_get_book(uut: TestClient, tmpdir, caplog):
"""test getting a book"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_book_test_data(tmpdir)
# when
users = uut.get("/books")
# then
assert users.status_code == 200
assert users.json() == [
{
"key": 0,
"title": "Sabriel",
"author": "Garth Nix",
"publisher": "Carlsen",
"number": 1,
"shorthand": "Car",
"category": "Fantasy",
"isbn": "3-551-58128-2",
}
]
def test_get_books(uut: TestClient, tmpdir, caplog):
"""test getting books if there is more than one"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_books_test_data(tmpdir)
# when
users = uut.get("/books")
# then
assert users.status_code == 200
assert users.json() == [
{
"key": 0,
"title": "Sabriel",
"author": "Garth Nix",
"publisher": "Carlsen",
"number": 1,
"shorthand": "Car",
"category": "Fantasy",
"isbn": "3-551-58128-2",
},
{
"key": 1,
"title": "Die granulare Gesellschaft",
"author": "Christoph Kucklick",
"publisher": "Ullstein",
"number": 2,
"shorthand": "Ull",
"category": "Sachbuch",
"isbn": "978-3-548-37625-7",
}
]
def test_put_book(uut: TestClient, tmpdir, caplog):
"""test inserting a book"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_book_test_data(tmpdir)
# when
users = uut.put("/book", json={
"key": 1,
"title": "Die granulare Gesellschaft",
"author": "Christoph Kucklick",
"publisher": "Ullstein",
"number": 2,
"shorthand": "Ull",
"category": "Sachbuch",
"isbn": "978-3-548-37625-7",
})
# then
assert users.json() == {"status": "book created"}
def test_put_book_with_existing_key(uut: TestClient, tmpdir, caplog):
"""test that adding a user with an existing key instead generates a
new key for the added user"""
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
uut.put("/user", json={
"key": 0,
"firstname": "Kira",
"lastname": "Kylar",
"class": "13a"
})
res = uut.get("/users")
df = pd.DataFrame.from_records(res.json())
assert df[df.firstname == "Kira"].key.values[0] == 1
def test_put_book_with_existing_key(uut: TestClient, tmpdir, caplog):
"""test that adding a book with an existing key instead generates a
new key for the added book"""
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_book_test_data(tmpdir)
uut.put("/book", json={
"key": 0,
"title": "Axiom's End",
"author": "Lindsay Ellis",
"publisher": "St. Martin's Press",
"number": "1",
"shorthand": "SMP",
"category": "SciFi",
"isbn": " 978-1250256737",
})
res = uut.get("/books")
df = pd.DataFrame.from_records(res.json())
assert df[df.title == "Axiom's End"].key.values[0] == 1
def test_lend_book(uut: TestClient, tmpdir, caplog):
"""test simple book lendin usecase"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
create_book_test_data(tmpdir)
# when
response = uut.patch("/borrow/0/0")
# then
caplog.set_level(logging.INFO)
path = os.path.join(tmpdir, BorrowingUser.__name__ + ".json")
df = pd.read_json(path)
expected_return_date = (
datetime.now() + relativedelta(weeks=3)).strftime("%d.%m.%Y")
# the returned value is the expected returndate
assert response.json() == {
"status": "successfully borrowed",
"return_date": expected_return_date
}
# Expiration date is set 3 weeks from today
assert df[df.user_key == 0].expiration_date.values[0] == expected_return_date
# the returned exspiration date is the same as the one saved
assert df[df.user_key == 0].expiration_date.values[0] == expected_return_date
# The start date saved is set to today
assert df[df.user_key == 0].start_date.values[0] == datetime.now(
).date().strftime("%d.%m.%Y")
def test_lend_book_that_is_already_borrowed(uut: TestClient, tmpdir, caplog):
"""test book lending usecase when book is already borrowed"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
create_book_test_data(tmpdir)
# when
response = uut.patch("/borrow/0/0")
response = uut.patch("/borrow/0/0")
# then
caplog.set_level(logging.INFO)
path = os.path.join(tmpdir, BorrowingUser.__name__ + ".json")
df = pd.read_json(path)
expected_return_date = (
datetime.now() + relativedelta(weeks=3)).strftime("%d.%m.%Y")
# the returned value is the expected returndate
assert response.json() == {
"status": "already borrowed",
"return_date": None
}
def test_return_book(uut: TestClient, tmpdir, caplog):
"""test simple book return usecase"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
create_book_test_data(tmpdir)
response = uut.patch("/borrow/0/0")
# when
response = uut.patch("/return/0/0")
# then
path = os.path.join(tmpdir, BorrowingUser.__name__ + ".json")
df = pd.read_json(path)
expected_date = datetime.now().strftime("%d.%m.%Y")
# returns todays date
assert response.json() == {"status": "successfully returned"}
# return date is inserted into the dataframe
assert df[(df.user_key == 0) & (df.book_key == 0)
].return_date.values[0] == expected_date
def test_return_not_borrowed_book(uut: TestClient, tmpdir, caplog):
"""test if a user tries to return a book that he/she has not borrowed, nothing happens"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
create_book_test_data(tmpdir)
# when
response = uut.patch("/return/0/0")
# then
assert response.json() == {"status": "book not borrowed"}
def test_return_book_as_unknown_user(uut: TestClient, tmpdir, caplog):
"""test that if an unknown user tries to return a book nothing happens"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_book_test_data(tmpdir)
# when
response = uut.patch("/return/0/0")
# then
assert response.json() == {"status": "user unknown"}
def test_borrow_same_book_two_times_with_returning_it(uut: TestClient, tmpdir, caplog):
"""test if a user can borrow, return and then borrow the same book twice"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_user_test_data(tmpdir)
create_book_test_data(tmpdir)
expected_return_date = (
datetime.now() + relativedelta(weeks=3)).strftime("%d.%m.%Y")
path = os.path.join(tmpdir, BorrowingUser.__name__ + ".json")
# when
response = uut.patch("/borrow/0/0")
response = uut.patch("/return/0/0")
response = uut.patch("/borrow/0/0")
# then
df = pd.read_json(path)
assert response.json() == {
"status": "successfully borrowed",
"return_date": expected_return_date
}
assert len(df[df.user_key == 0].key.unique()) == 2
assert response.json() == {
"status": "successfully borrowed",
"return_date": expected_return_date
}
def test_return_book_as_wrong_user(uut: TestClient, tmpdir, caplog):
"""test that you cant return a book as another person than that borrowed the book"""
# given
caplog.set_level(logging.INFO)
biblerAPI.save_path = tmpdir
create_users_test_data(tmpdir)
create_book_test_data(tmpdir)
# when
response = uut.patch("/borrow/0/0")
response = uut.patch("/return/1/0")
# then
assert response.json() == {
"status": "book not borrowed",
}
| 28.461894
| 93
| 0.58739
|
6764b6347f0a78681fb6ecb4247b0c8499b3b305
| 398
|
py
|
Python
|
python/perspective/perspective/table/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/perspective/table/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
python/perspective/perspective/table/__init__.py
|
sebinsua/perspective
|
2c19c5fa0046597e30ec780ae08655767c5253d4
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from .table import Table
from .manager import PerspectiveManager
__all__ = ["Table", "PerspectiveManager"]
| 33.166667
| 79
| 0.605528
|
58ceeccf2986eaf160b3038ba95411836add39cd
| 2,168
|
py
|
Python
|
MultiUART.py
|
tonbut/python-multiuart
|
63e45ee59f7e5e4542689a05104bb2445369e4c5
|
[
"MIT"
] | 10
|
2018-07-21T19:18:07.000Z
|
2022-02-04T19:56:01.000Z
|
MultiUART.py
|
tonbut/python-multiuart
|
63e45ee59f7e5e4542689a05104bb2445369e4c5
|
[
"MIT"
] | null | null | null |
MultiUART.py
|
tonbut/python-multiuart
|
63e45ee59f7e5e4542689a05104bb2445369e4c5
|
[
"MIT"
] | null | null | null |
import spidev
import time
class MultiUART:
spi=None
uart=None
def __init__(self,UART,SPIDivider):
self.uart=UART
spi = spidev.SpiDev()
spi.open(0,0)
self.spi=spi
spi.lsbfirst=False
#div64 = ??? 250Mhz/64 = 4000000
spi.max_speed_hz=250000000/SPIDivider
self.spi.cshigh=False
spi.loop=False
#spi.bits_per_word=8
return
def cleanup(self):
self.spi.close()
return
# Returns the number of received bytes held in queue for the selected channel.
def checkRx(self):
result=0
self.spi.xfer2( [0x10 | self.uart ]);
time.sleep(0.00250)
result=self.spi.readbytes(1)[0]
time.sleep(0.00250)
return result
def checkTx(self):
result=0
self.spi.xfer2( [0x30 | self.uart ]);
time.sleep(0.00250)
result=self.spi.readbytes(1)[0]
time.sleep(0.00250)
return result
def receiveByte(self):
self.spi.xfer2( [0x20 | self.uart ]);
time.sleep(0.001)
self.spi.xfer2( [1]);
time.sleep(0.001)
result=self.spi.xfer2([0xFF])[0]
time.sleep(0.001)
return result
def flushRx(self):
c=self.checkRx()
if c>0:
self.receiveBytes(c)
c=self.checkRx()
if c>0:
self.receiveBytes(c)
def receiveBytes(self, NUMBYTES):
self.spi.xfer2( [0x20 | self.uart ]);
time.sleep(0.001)
self.spi.xfer2( [NUMBYTES]);
result=[]
for i in range(0,NUMBYTES):
time.sleep(0.0005)
v=self.spi.xfer2([0xFF])[0]
result.append(v)
time.sleep(0.001)
return result
def transmitByte(self, DATA):
self.spi.xfer2( [0x40 | self.uart ]);
time.sleep(0.001)
self.spi.xfer2( [1]);
time.sleep(0.001)
result=self.spi.xfer2([DATA])[0]
time.sleep(0.001)
return
def transmitBytes(self, DATA):
self.spi.xfer2( [0x40 | self.uart ]);
time.sleep(0.001)
length=len(DATA)
self.spi.xfer2( [length]);
for i in range(0,length):
time.sleep(0.0005)
self.spi.xfer2([DATA[i]])
time.sleep(0.001)
return
# Configures the baud rate of the selected channel.
# Baud : 0=1200, 1=2400, 2=4800, 3=9600, 4=19200, 5=38400, 6=57600, 7=115200
def setBaud(self, BAUD):
self.spi.xfer2( [0x80 | self.uart ]);
time.sleep(0.00250)
result=self.spi.xfer2([ BAUD ]);
time.sleep(0.1)
return
| 21.465347
| 80
| 0.659133
|
1f3380c942896c7571bee26f2a41900910f13933
| 304
|
py
|
Python
|
ninja/__init__.py
|
duplxey/django-ninja
|
7e0aed29a401d26942a6c95f6a1559dd3ed83aea
|
[
"MIT"
] | 1
|
2021-07-10T02:23:15.000Z
|
2021-07-10T02:23:15.000Z
|
ninja/__init__.py
|
duplxey/django-ninja
|
7e0aed29a401d26942a6c95f6a1559dd3ed83aea
|
[
"MIT"
] | null | null | null |
ninja/__init__.py
|
duplxey/django-ninja
|
7e0aed29a401d26942a6c95f6a1559dd3ed83aea
|
[
"MIT"
] | null | null | null |
"""Django Ninja - Fast Django REST framework"""
__version__ = "0.10.1"
from ninja.main import NinjaAPI
from ninja.params import Query, Path, Header, Cookie, Body, Form, File
from ninja.router import Router
from ninja.schema import Schema
from ninja.files import UploadedFile
from pydantic import Field
| 27.636364
| 70
| 0.786184
|
c7ec295458debb2a2925457a1b8a07c86ca6877e
| 1,200
|
py
|
Python
|
scripts/type_extractor/type_extractor/utils.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 4,816
|
2017-12-12T18:07:09.000Z
|
2019-04-17T02:01:04.000Z
|
scripts/type_extractor/type_extractor/utils.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 514
|
2017-12-12T18:22:52.000Z
|
2019-04-16T16:07:11.000Z
|
scripts/type_extractor/type_extractor/utils.py
|
mehrdad-shokri/retdec
|
a82f16e97b163afe789876e0a819489c5b9b358e
|
[
"MIT",
"Zlib",
"BSD-3-Clause"
] | 579
|
2017-12-12T18:38:02.000Z
|
2019-04-11T13:32:53.000Z
|
"""Utilities."""
import logging
import os
def get_files_with_suffix_from_all_paths(paths, suffix=''):
"""For all paths returns path if it's file.
Otherwise recursively walks path and returns all files with given suffix.
"""
for path in paths:
for f in get_files_with_suffix_from_path(path, suffix):
yield f
def get_files_with_suffix_from_path(path, suffix=''):
"""Returns path if it's file. Otherwise recursively walks path and returns all
files with given suffix.
"""
if os.path.isfile(path) and path.endswith(suffix):
yield path
else:
for dir_path, _, file_list in os.walk(path):
for fname in sorted(file_list):
if fname.endswith(suffix):
yield os.path.join(dir_path, fname)
def setup_logging(enable):
"""Sets up the logging facilities."""
if enable:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s')
else:
logging.disable(logging.CRITICAL)
def object_attr_string_repr(attr):
"""Returns string representation of attr."""
return str(attr) if attr is not None else ''
| 29.268293
| 82
| 0.645833
|
f8431f9b94f30129fe3782b70aecabc5d806f312
| 1,550
|
py
|
Python
|
Tuplas.py
|
vnnstar/Python-Mundo3-CursoEmVideo
|
cfb51a39f0240857469473a1f21970d3fb4b6076
|
[
"MIT"
] | null | null | null |
Tuplas.py
|
vnnstar/Python-Mundo3-CursoEmVideo
|
cfb51a39f0240857469473a1f21970d3fb4b6076
|
[
"MIT"
] | null | null | null |
Tuplas.py
|
vnnstar/Python-Mundo3-CursoEmVideo
|
cfb51a39f0240857469473a1f21970d3fb4b6076
|
[
"MIT"
] | null | null | null |
lanche = 'Hambúrguer', 'Suco', 'Pizza', 'Pudim'
# tuplas são representadas entre parenteses ('Hambúrguer', 'Suco', 'Pizza',
# 'Pudim') porém o python identifica que é uma tupla mesmo sem parenteses
print(lanche)
print(lanche[1]) # mostra o valor na posição 1
print(lanche[:2]) # vai do inicio até o 1, pois ele ignora o último
print(lanche[1:3]) # começa na posição 1 e vai até 2, ignorando o 3
print(len(lanche))
for contador in range(0, len(lanche)):
print(f"Vou comer {lanche[contador]} que está na posição "
f"{contador} ")
print('-' * 60)
for pos, comida in enumerate(lanche):
print(f'Vou comer {comida} que está na posição {pos}')
print('-' * 60)
for comida in lanche:
print(f'Vou comer {comida}')
print('-' * 60)
print(sorted(lanche)) # ordenação da tupla porém não alterae pois é imutavel
print('-' * 60)
a = (2, 5, 4)
b = (5, 8, 2)
c = a + b
print(c)
print('-' * 60)
print(c.count(5)) # conta quantas vezes aparece o valor específicado no count
print('-' * 60)
print(c.index(8)) # index informa em qual posição está o valor específicado
print('-' * 60)
print(c.index(2)) # e só pega a primeira ocorrência
print('-' * 60)
# é possível escolher onde o index começa a verificar da seguinte forma >>
print(c.index(2, 3))
# assim o index começa a verificar a partir do elemento 3 e não 0
print('-' * 60)
pessoa = ('Vinicius', 27)
print(pessoa)
print('-' * 60)
del(pessoa) # é possível apagar uma variável de tupla
# print(pessoa) aqui já apresenta pessoa undefined ou seja foi excluido
print('-' * 60)
| 31
| 78
| 0.674194
|
ccf2ddfa5a74a8fb3df5ec92f991a5d23b65a8de
| 11,223
|
py
|
Python
|
src/pretalx/orga/views/review.py
|
jjasghar/pretalx
|
c5041ec4001c5ef66cdb48e718789a086de280a2
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/views/review.py
|
jjasghar/pretalx
|
c5041ec4001c5ef66cdb48e718789a086de280a2
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/orga/views/review.py
|
jjasghar/pretalx
|
c5041ec4001c5ef66cdb48e718789a086de280a2
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import messages
from django.db import transaction
from django.db.models import Count, Exists, OuterRef, Q
from django.shortcuts import get_object_or_404, redirect
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView, TemplateView
from django_context_decorator import context
from pretalx.common.mixins.views import (
EventPermissionRequired, Filterable, PermissionRequired,
)
from pretalx.common.phrases import phrases
from pretalx.common.views import CreateOrUpdateView
from pretalx.orga.forms import ReviewForm
from pretalx.submission.forms import QuestionsForm, SubmissionFilterForm
from pretalx.submission.models import Review, Submission, SubmissionStates
class ReviewDashboard(EventPermissionRequired, Filterable, ListView):
template_name = 'orga/review/dashboard.html'
paginate_by = None
context_object_name = 'submissions'
permission_required = 'orga.view_review_dashboard'
default_filters = (
'code__icontains',
'speakers__name__icontains',
'title__icontains',
)
filter_fields = ('submission_type', 'state', 'track')
def get_filter_form(self):
return SubmissionFilterForm(
data=self.request.GET,
event=self.request.event,
usable_states=[
SubmissionStates.SUBMITTED,
SubmissionStates.ACCEPTED,
SubmissionStates.REJECTED,
SubmissionStates.CONFIRMED,
],
)
def get_queryset(self):
queryset = self.request.event.submissions.filter(
state__in=[
SubmissionStates.SUBMITTED,
SubmissionStates.ACCEPTED,
SubmissionStates.REJECTED,
SubmissionStates.CONFIRMED,
]
)
limit_tracks = self.request.user.teams.filter(
Q(all_events=True)
| Q(
Q(all_events=False)
& Q(limit_events__in=[self.request.event])
),
limit_tracks__isnull=False,
)
if limit_tracks:
tracks = set()
for team in limit_tracks:
tracks.update(team.limit_tracks.filter(event=self.request.event))
queryset = queryset.filter(track__in=tracks)
queryset = self.filter_queryset(queryset).annotate(review_count=Count('reviews'))
can_see_all_reviews = self.request.user.has_perm('orga.view_all_reviews', self.request.event)
overridden_reviews = Review.objects.filter(
override_vote__isnull=False, submission_id=OuterRef('pk')
)
if not can_see_all_reviews:
overridden_reviews = overridden_reviews.filter(user=self.request.user)
queryset = (
queryset.annotate(has_override=Exists(overridden_reviews))
.select_related('track', 'submission_type')
.prefetch_related('speakers', 'reviews', 'reviews__user')
)
for submission in queryset:
if can_see_all_reviews:
submission.current_score = submission.median_score
else:
reviews = [review for review in submission.reviews.all() if review.user == self.request.user]
submission.current_score = None
if reviews:
submission.current_score = reviews[0].score
return self.sort_queryset(queryset)
def sort_queryset(self, queryset):
order_prevalence = {
'default': ('state', 'current_score', 'code'),
'score': ('current_score', 'state', 'code'),
'count': ('review_count', 'code')
}
ordering = self.request.GET.get('sort', 'default')
reverse = True
if ordering.startswith('-'):
reverse = False
ordering = ordering[1:]
order = order_prevalence.get(ordering, order_prevalence['default'])
def get_order_tuple(obj):
return tuple(
getattr(obj, key)
if not (key == 'current_score' and not obj.current_score)
else 100 * -int(reverse)
for key in order
)
return sorted(
queryset,
key=get_order_tuple,
reverse=reverse,
)
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
missing_reviews = Review.find_missing_reviews(
self.request.event, self.request.user
)
result['missing_reviews'] = missing_reviews
result['next_submission'] = missing_reviews.first()
return result
@transaction.atomic
def post(self, request, *args, **kwargs):
total = {'accept': 0, 'reject': 0, 'error': 0}
for key, value in request.POST.items():
if not key.startswith('s-') or value not in ['accept', 'reject']:
continue
pk = key.strip('s-')
try:
submission = request.event.submissions.filter(state='submitted').get(pk=pk)
except Submission.DoesNotExist:
total['error'] += 1
continue
if not request.user.has_perm('submission.' + value + '_submission', submission):
total['error'] += 1
continue
getattr(submission, value)(person=request.user)
total[value] += 1
if not total['accept'] and not total['reject'] and not total['error']:
messages.success(request, _('There was nothing to do.'))
elif total['accept'] or total['reject']:
msg = str(_('Success! {accepted} submissions were accepted, {rejected} submissions were rejected.')).format(accepted=total['accept'], rejected=total['reject'])
if total['error']:
msg += ' ' + str(_('We were unable to change the state of {count} submissions.')).format(count=total['error'])
messages.success(request, msg)
else:
messages.error(request, str(_('We were unable to change the state of all {count} submissions.')).format(count=total['error']))
return super().get(request, *args, **kwargs)
class ReviewSubmission(PermissionRequired, CreateOrUpdateView):
form_class = ReviewForm
model = Review
template_name = 'orga/submission/review.html'
permission_required = 'submission.view_reviews'
write_permission_required = 'submission.review_submission'
@context
@cached_property
def submission(self):
return get_object_or_404(
self.request.event.submissions, code__iexact=self.kwargs['code']
)
@cached_property
def object(self):
return (
self.submission.reviews.exclude(user__in=self.submission.speakers.all())
.filter(user=self.request.user)
.first()
)
def get_object(self):
return self.object
def get_permission_object(self):
return self.submission
@context
@cached_property
def read_only(self):
return not self.request.user.has_perm(
'submission.review_submission', self.get_object() or self.submission
)
@context
def profiles(self):
return [
speaker.event_profile(self.request.event)
for speaker in self.submission.speakers.all()
]
@context
def reviews(self):
return [
{
'score': review.display_score,
'text': review.text,
'user': review.user.get_display_name(),
'answers': [
review.answers.filter(question=question).first()
for question in self.qform.queryset
],
}
for review in self.submission.reviews.exclude(
pk=(self.object.pk if self.object else None)
)
]
@context
@cached_property
def qform(self):
return QuestionsForm(
target='reviewer',
event=self.request.event,
data=(self.request.POST if self.request.method == 'POST' else None),
files=(self.request.FILES if self.request.method == 'POST' else None),
speaker=self.request.user,
review=self.object,
readonly=self.read_only,
)
@context
def skip_for_now(self):
return Review.find_missing_reviews(
self.request.event, self.request.user, ignore=[self.submission]
).first()
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
result['done'] = self.request.user.reviews.filter(submission__event=self.request.event).count()
result['total_reviews'] = Review.find_missing_reviews(
self.request.event, self.request.user
).count() + result['done']
if result['total_reviews']:
result['percentage'] = int(result['done'] * 100 / result['total_reviews'])
return result
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['event'] = self.request.event
kwargs['user'] = self.request.user
kwargs['read_only'] = self.read_only
return kwargs
def form_valid(self, form):
if not self.qform.is_valid():
messages.error(self.request, _('There have been errors with your input.'))
return redirect(self.get_success_url())
form.instance.submission = self.submission
form.instance.user = self.request.user
if not form.instance.pk:
if not self.request.user.has_perm(
'submission.review_submission', self.submission
):
messages.error(
self.request, _('You cannot review this submission at this time.')
)
return redirect(self.get_success_url())
if form.instance.pk and not self.request.user.has_perm(
'submission.edit_review', form.instance
):
messages.error(
self.request, _('You cannot review this submission at this time.')
)
return redirect(self.get_success_url())
form.save()
self.qform.review = form.instance
self.qform.save()
return super().form_valid(form)
def get_success_url(self) -> str:
if self.request.POST.get('show_next', '0').strip() == '1':
next_submission = Review.find_missing_reviews(
self.request.event, self.request.user
).first()
if next_submission:
messages.success(self.request, phrases.orga.another_review)
return next_submission.orga_urls.reviews
messages.success(
self.request, _('Nice, you have no submissions left to review!')
)
return self.request.event.orga_urls.reviews
return self.submission.orga_urls.reviews
class ReviewSubmissionDelete(EventPermissionRequired, TemplateView):
template_name = 'orga/review/submission_delete.html'
permission_required = 'orga.remove_review'
| 37.661074
| 171
| 0.60973
|
fe330300a671b90f899e8970ed42c6a51dec40c1
| 11,486
|
py
|
Python
|
pl_bolts/models/self_supervised/amdim/networks.py
|
jfrancis71/pytorch-lightning-bolts
|
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/models/self_supervised/amdim/networks.py
|
jfrancis71/pytorch-lightning-bolts
|
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/models/self_supervised/amdim/networks.py
|
jfrancis71/pytorch-lightning-bolts
|
8a4cf8f61644c28d6df54ccffe3a52d6f5fce5a6
|
[
"Apache-2.0"
] | null | null | null |
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
class AMDIMEncoder(nn.Module):
def __init__(self, dummy_batch, num_channels=3, encoder_feature_dim=64, embedding_fx_dim=512,
conv_block_depth=3, encoder_size=32, use_bn=False):
super().__init__()
# NDF = encoder hidden feat size
# RKHS = output dim
n_depth = conv_block_depth
ndf = encoder_feature_dim
self.ndf = encoder_feature_dim
n_rkhs = embedding_fx_dim
self.n_rkhs = embedding_fx_dim
self.use_bn = use_bn
self.dim2layer = None
self.encoder_size = encoder_size
# encoding block for local features
print(f'Using a {encoder_size}x{encoder_size} encoder')
if encoder_size == 32:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResNxN(ndf, ndf, 1, 1, 0, use_bn),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 4, True, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 4, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 4, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 64:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
elif encoder_size == 128:
self.layer_list = nn.ModuleList([
Conv3x3(num_channels, ndf, 5, 2, 2, False, pad_mode='reflect'),
Conv3x3(ndf, ndf, 3, 1, 0, False),
ConvResBlock(ndf * 1, ndf * 2, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 2, ndf * 4, 4, 2, 0, n_depth, use_bn),
ConvResBlock(ndf * 4, ndf * 8, 2, 2, 0, n_depth, use_bn),
MaybeBatchNorm2d(ndf * 8, True, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResBlock(ndf * 8, ndf * 8, 3, 1, 0, n_depth, use_bn),
ConvResNxN(ndf * 8, n_rkhs, 3, 1, 0, use_bn),
MaybeBatchNorm2d(n_rkhs, True, True)
])
else:
raise RuntimeError(f"Could not build encoder. Encoder size {encoder_size} is not supported")
self._config_modules(
dummy_batch,
output_widths=[1, 5, 7],
n_rkhs=n_rkhs,
use_bn=use_bn
)
def init_weights(self, init_scale=1.):
"""
Run custom weight init for modules...
"""
for layer in self.layer_list:
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
for layer in self.modules():
if isinstance(layer, (ConvResNxN, ConvResBlock)):
layer.init_weights(init_scale)
if isinstance(layer, FakeRKHSConvNet):
layer.init_weights(init_scale)
def _config_modules(self, x, output_widths, n_rkhs, use_bn):
"""
Configure the modules for extracting fake rkhs embeddings for infomax.
"""
# get activations from each block to see output dims
enc_acts = self._forward_acts(x)
# out dimension to layer index
# dim = number of output feature vectors
self.dim2layer = {}
# pull out layer indexes for the requested output_widths
for layer_i, conv_out in enumerate(enc_acts):
for output_width in output_widths:
b, c, w, h = conv_out.size()
if w == output_width:
self.dim2layer[w] = layer_i
# get projected activation sizes at different layers
# ndf_1 = enc_acts[self.dim2layer[1]].size(1)
ndf_5 = enc_acts[self.dim2layer[5]].size(1)
ndf_7 = enc_acts[self.dim2layer[7]].size(1)
# configure modules for fake rkhs embeddings
self.rkhs_block_5 = FakeRKHSConvNet(ndf_5, n_rkhs, use_bn)
self.rkhs_block_7 = FakeRKHSConvNet(ndf_7, n_rkhs, use_bn)
def _forward_acts(self, x):
"""
Return activations from all layers.
"""
# run forward pass through all layers
layer_acts = [x]
for _, layer in enumerate(self.layer_list):
layer_in = layer_acts[-1]
layer_out = layer(layer_in)
layer_acts.append(layer_out)
# remove input from the returned list of activations
return_acts = layer_acts[1:]
return return_acts
def forward(self, x):
# compute activations in all layers for x
activations = self._forward_acts(x)
# gather rkhs embeddings from certain layers
# last feature map with (b, d, 1, 1) (ie: last network out)
r1 = activations[self.dim2layer[1]]
# last feature map with (b, d, 5, 5)
r5 = activations[self.dim2layer[5]]
r5 = self.rkhs_block_5(r5)
# last feature map with (b, d, 7, 7)
r7 = activations[self.dim2layer[7]]
r7 = self.rkhs_block_7(r7)
return r1, r5, r7
class Conv3x3(nn.Module):
def __init__(self, n_in, n_out, n_kern, n_stride, n_pad,
use_bn=True, pad_mode='constant'):
super(Conv3x3, self).__init__()
assert (pad_mode in ['constant', 'reflect'])
self.n_pad = (n_pad, n_pad, n_pad, n_pad)
self.pad_mode = pad_mode
self.conv = nn.Conv2d(n_in, n_out, n_kern, n_stride, 0,
bias=(not use_bn))
self.relu = nn.ReLU(inplace=True)
self.bn = MaybeBatchNorm2d(n_out, True, use_bn)
def forward(self, x):
if self.n_pad[0] > 0:
# maybe pad the input
x = F.pad(x, self.n_pad, mode=self.pad_mode)
# always apply conv
x = self.conv(x)
# maybe apply batchnorm
x = self.bn(x)
# always apply relu
out = self.relu(x)
return out
class ConvResBlock(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, depth, use_bn):
super(ConvResBlock, self).__init__()
layer_list = [ConvResNxN(n_in, n_out, width, stride, pad, use_bn)]
for i in range(depth - 1):
layer_list.append(ConvResNxN(n_out, n_out, 1, 1, 0, use_bn))
self.layer_list = nn.Sequential(*layer_list)
def init_weights(self, init_scale=1.):
"""
Do a fixup-ish init for each ConvResNxN in this block.
"""
for m in self.layer_list:
m.init_weights(init_scale)
def forward(self, x):
# run forward pass through the list of ConvResNxN layers
x_out = self.layer_list(x)
return x_out
class ConvResNxN(nn.Module):
def __init__(self, n_in, n_out, width, stride, pad, use_bn=False):
super(ConvResNxN, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.width = width
self.stride = stride
self.pad = pad
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(n_in, n_out, width, stride, pad, bias=False)
self.conv2 = nn.Conv2d(n_out, n_out, 1, 1, 0, bias=False)
self.n_grow = n_out - n_in
if self.n_grow < 0:
# use self.conv3 to downsample feature dim
self.conv3 = nn.Conv2d(n_in, n_out, width, stride, pad, bias=True)
else:
# self.conv3 is not used when n_out >= n_in
self.conv3 = None
self.bn1 = MaybeBatchNorm2d(n_out, True, use_bn)
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
def forward(self, x):
h1 = self.bn1(self.conv1(x))
h2 = self.conv2(self.relu2(h1))
if self.n_out < self.n_in:
h3 = self.conv3(x)
elif self.n_in == self.n_out:
h3 = F.avg_pool2d(x, self.width, self.stride, self.pad)
else:
h3_pool = F.avg_pool2d(x, self.width, self.stride, self.pad)
h3 = F.pad(h3_pool, (0, 0, 0, 0, 0, self.n_grow))
h23 = h2 + h3
return h23
class MaybeBatchNorm2d(nn.Module):
def __init__(self, n_ftr, affine, use_bn):
super(MaybeBatchNorm2d, self).__init__()
self.bn = nn.BatchNorm2d(n_ftr, affine=affine)
self.use_bn = use_bn
def forward(self, x):
if self.use_bn:
x = self.bn(x)
return x
class NopNet(nn.Module):
def __init__(self, norm_dim=None):
super(NopNet, self).__init__()
self.norm_dim = norm_dim
def forward(self, x):
if self.norm_dim is not None:
x_norms = torch.sum(x ** 2., dim=self.norm_dim, keepdim=True)
x_norms = torch.sqrt(x_norms + 1e-6)
x = x / x_norms
return x
class FakeRKHSConvNet(nn.Module):
def __init__(self, n_input, n_output, use_bn=False):
super(FakeRKHSConvNet, self).__init__()
self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1,
padding=0, bias=False)
self.bn_out = MaybeBatchNorm2d(n_output, True, True)
self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1,
stride=1, padding=0, bias=True)
# when possible, initialize shortcut to be like identity
if n_output >= n_input:
eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool)
for i in range(n_input):
eye_mask[i, i, 0, 0] = 1
self.shortcut.weight.data.uniform_(-0.01, 0.01)
self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.)
def init_weights(self, init_scale=1.):
# initialize first conv in res branch
# -- rescale the default init for nn.Conv2d layers
nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5))
self.conv1.weight.data.mul_(init_scale)
# initialize second conv in res branch
# -- set to 0, like fixup/zero init
nn.init.constant_(self.conv2.weight, 0.)
def forward(self, x):
h_res = self.conv2(self.relu1(self.bn1(self.conv1(x))))
h = self.bn_out(h_res + self.shortcut(x))
return h
| 39.068027
| 104
| 0.578705
|
b872ab723e31a0f4bc1a6c1d6483dedf8658cb78
| 2,167
|
py
|
Python
|
python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py
|
slf12/Paddle
|
fa43d74a3a16ac696db5dc893c9a7b1c6913dc85
|
[
"Apache-2.0"
] | 1
|
2020-05-02T00:00:20.000Z
|
2020-05-02T00:00:20.000Z
|
python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/dygraph/dygraph_to_static/call_transformer.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | 4
|
2020-07-27T13:24:03.000Z
|
2020-08-06T08:20:32.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import is_paddle_api
class CallTransformer(gast.NodeTransformer):
"""
This class transforms function calls into Static Graph Ast.
"""
def __init__(self, wrapper_root):
assert isinstance(
wrapper_root, AstNodeWrapper
), "Input non-AstNodeWrapper node for the initialization of CallTransformer."
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def _is_builtin_call(self, node):
assert isinstance(node, gast.Call)
func_str = ast_to_source_code(node.func).strip()
try:
from paddle.fluid.dygraph.dygraph_to_static.convert_call_func import is_builtin
return eval("is_builtin({})".format(func_str))
except Exception:
return False
def transform(self):
self.visit(self.root)
def visit_Call(self, node):
self.generic_visit(node)
if is_paddle_api(node):
return node
if self._is_builtin_call(node):
return node
func_str = ast_to_source_code(node.func).strip()
new_func_str = "fluid.dygraph.dygraph_to_static.convert_call({})".format(
func_str)
new_func_ast = gast.parse(new_func_str).body[0].value
node.func = new_func_ast
return node
| 34.951613
| 91
| 0.708814
|
b3304b9e2c1161e4e5489e95f9f2278bde95d7b0
| 1,630
|
py
|
Python
|
run_all.py
|
abhishakvarshney/movie_rating_bot
|
767d78a074b520f42047ddfca35a6591c55f7fc4
|
[
"Apache-2.0"
] | 1
|
2019-11-12T08:19:35.000Z
|
2019-11-12T08:19:35.000Z
|
run_all.py
|
abhishakvarshney/movie_rating_bot
|
767d78a074b520f42047ddfca35a6591c55f7fc4
|
[
"Apache-2.0"
] | null | null | null |
run_all.py
|
abhishakvarshney/movie_rating_bot
|
767d78a074b520f42047ddfca35a6591c55f7fc4
|
[
"Apache-2.0"
] | 2
|
2019-11-15T17:52:44.000Z
|
2019-11-16T09:24:01.000Z
|
import argparse
import logging
import rasa.core.run
from rasa.core.channels.console import CmdlineInput
from rasa.core.agent import Agent
from rasa.core.interpreter import RasaNLUInterpreter
from rasa.core.tracker_store import MongoTrackerStore
from rasa.core.training import interactive
from rasa.core.utils import EndpointConfig
logger = logging.getLogger(__name__)
def run(serve_forever=True):
interpreter = RasaNLUInterpreter("models/nlu")
agent = Agent.load("models/dialogue", interpreter=interpreter)
action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
if serve_forever:
agent.handle_channels([CmdlineInput()])
return agent
def interactive_learning(serve_forever=True):
import logging
from rasa.core import utils, train
logger = logging.getLogger(__name__)
return train(domain_file="domain.yml",
output_path="model/dialogue",
policy_config = "config_nlu.yml",
kwargs={"batch_size": 50,
"epochs": 200,
"max_training_samples": 300
},
training_resource='data/stories.md')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="debug log for development and production"
)
parser.add_argument("-d", "--debug", help="Set the logging level")
args = parser.parse_args()
if args == "debug":
logging.DEBUG = True
else:
logging.DEBUG = False
run()
# agent = interactive_learning()
# interactive.run_interactive_learning('data/stories.md')
| 29.636364
| 73
| 0.671779
|
da8e7067e9d0bdb821245fc570af6cf1920ab616
| 1,002
|
py
|
Python
|
fastapi_workshop/models/customer.py
|
jbeigbeder/fastapi-workshop
|
852a8373c325f3177f2a3e5d572c8ded6c7be73e
|
[
"MIT"
] | null | null | null |
fastapi_workshop/models/customer.py
|
jbeigbeder/fastapi-workshop
|
852a8373c325f3177f2a3e5d572c8ded6c7be73e
|
[
"MIT"
] | null | null | null |
fastapi_workshop/models/customer.py
|
jbeigbeder/fastapi-workshop
|
852a8373c325f3177f2a3e5d572c8ded6c7be73e
|
[
"MIT"
] | null | null | null |
"""database models: customer and order"""
from sqlalchemy import Column, Integer, String, Boolean, Date, ForeignKey
from sqlalchemy.orm import relationship
from ..database import Base
class Order(Base):
"""Order model"""
__tablename__ = 'order'
id = Column(Integer, primary_key=True, index=True)
date = Column(Date, nullable=False)
total = Column(String(10), nullable=False)
customer_id = Column(Integer, ForeignKey("customer.id"))
class Customer(Base):
"""Customer model"""
__tablename__ = 'customer'
id = Column(name="id", type_=Integer, primary_key=True, index=True)
name = Column(name="name", type_=String(50), nullable=False)
email = Column(name="email",
type_=String(100),
nullable=False,
index=True,
unique=True)
is_active = Column(name='is_active', type_=Boolean, nullable=False)
birthday = Column(name='birthday', type_=Date)
orders = relationship(Order)
| 32.322581
| 73
| 0.651697
|
0a4a2a7e1ea6afd9ddca97513aa1baa8382e22fa
| 13,725
|
py
|
Python
|
cryptoapis/model/get_transaction_details_by_transaction_id_from_callback_ribsbc.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
cryptoapis/model/get_transaction_details_by_transaction_id_from_callback_ribsbc.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
cryptoapis/model/get_transaction_details_by_transaction_id_from_callback_ribsbc.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.get_transaction_details_by_transaction_idribsbc_vin import GetTransactionDetailsByTransactionIDRIBSBCVin
from cryptoapis.model.get_transaction_details_by_transaction_idribsbc_vout import GetTransactionDetailsByTransactionIDRIBSBCVout
globals()['GetTransactionDetailsByTransactionIDRIBSBCVin'] = GetTransactionDetailsByTransactionIDRIBSBCVin
globals()['GetTransactionDetailsByTransactionIDRIBSBCVout'] = GetTransactionDetailsByTransactionIDRIBSBCVout
class GetTransactionDetailsByTransactionIDFromCallbackRIBSBC(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'locktime': (int,), # noqa: E501
'size': (int,), # noqa: E501
'version': (int,), # noqa: E501
'vin': ([GetTransactionDetailsByTransactionIDRIBSBCVin],), # noqa: E501
'vout': ([GetTransactionDetailsByTransactionIDRIBSBCVout],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'locktime': 'locktime', # noqa: E501
'size': 'size', # noqa: E501
'version': 'version', # noqa: E501
'vin': 'vin', # noqa: E501
'vout': 'vout', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, locktime, size, version, vin, vout, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDFromCallbackRIBSBC - a model defined in OpenAPI
Args:
locktime (int): Represents the time at which a particular transaction can be added to the blockchain.
size (int): Represents the total size of this transaction.
version (int): Represents transaction version number.
vin ([GetTransactionDetailsByTransactionIDRIBSBCVin]): Represents the transaction inputs.
vout ([GetTransactionDetailsByTransactionIDRIBSBCVout]): Represents the transaction outputs.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.locktime = locktime
self.size = size
self.version = version
self.vin = vin
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, locktime, size, version, vin, vout, *args, **kwargs): # noqa: E501
"""GetTransactionDetailsByTransactionIDFromCallbackRIBSBC - a model defined in OpenAPI
Args:
locktime (int): Represents the time at which a particular transaction can be added to the blockchain.
size (int): Represents the total size of this transaction.
version (int): Represents transaction version number.
vin ([GetTransactionDetailsByTransactionIDRIBSBCVin]): Represents the transaction inputs.
vout ([GetTransactionDetailsByTransactionIDRIBSBCVout]): Represents the transaction outputs.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.locktime = locktime
self.size = size
self.version = version
self.vin = vin
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 46.525424
| 484
| 0.601311
|
de596b0c3eac2fb63574d187f4b746a78dca812f
| 1,131
|
py
|
Python
|
gravis3d/generator.py
|
rajangarhwal/Gravis3D
|
c1b1a3a712ef6bba82c7cd4c98be4e8ffaac0d29
|
[
"MIT"
] | 1
|
2020-07-30T05:54:08.000Z
|
2020-07-30T05:54:08.000Z
|
gravis3d/generator.py
|
rajangarhwal/Gravis3D
|
c1b1a3a712ef6bba82c7cd4c98be4e8ffaac0d29
|
[
"MIT"
] | null | null | null |
gravis3d/generator.py
|
rajangarhwal/Gravis3D
|
c1b1a3a712ef6bba82c7cd4c98be4e8ffaac0d29
|
[
"MIT"
] | null | null | null |
from body import Body
from numpy.random import normal
from vpython import *
class Nbodies:
"""
A class to implement all bodies.
"""
def __init__(self, N = None):
self.N = N or 10
x,y,z = normal(0,1e11,N), normal(0,1e11,N), normal(0,1e11,N)
vx,vy,vz = normal(0,1e7,N), normal(0,1e7,N), normal(0,1e7,N)
r = abs(normal(1e9,1e8,N))
self.bodies = []
for i in range(N):
newbody = Body(radius = r[i], pos = vector(x[i],y[i],z[i]), velocity = vector(vx[i],vy[i],vz[i]))
#newbody = Body()
self.bodies.append(newbody)
def add_particle(self,position):
r = abs(normal(1e9,1e8,1))
vx,vy,vz = normal(0,1e7,1), normal(0,1e7,1), normal(0,1e7,1)
add = Body(radius = r[0], pos = vector(position), velocity = vector(vx[0],vy[0],vz[0]))
self.bodies.append(add)
def update(self):
for i in range(self.N):
for j in range(self.N):
if i!=j:
self.bodies[i].attract(self.bodies[j])
#self.bodies[i].move()
| 33.264706
| 109
| 0.519894
|
4360e6477545e7994163aff91f910bbb1a365112
| 1,147
|
py
|
Python
|
world.py
|
ctII/somegame
|
d49aaed0b23abe99deebe9ad80cb23b05dd5a75d
|
[
"MIT"
] | null | null | null |
world.py
|
ctII/somegame
|
d49aaed0b23abe99deebe9ad80cb23b05dd5a75d
|
[
"MIT"
] | null | null | null |
world.py
|
ctII/somegame
|
d49aaed0b23abe99deebe9ad80cb23b05dd5a75d
|
[
"MIT"
] | null | null | null |
from terrainBarrier import terrainBarrier
#entire terrain system needs to be redone, getTerrainAt is currently just bruteforcing
class world:
def __init__(self, width, height):
self.entities = []
#terrible implementation of terrain
self.terrain = []
for i in range(0, width):
self.terrain.append(terrainBarrier(0, i))
self.terrain.append(terrainBarrier(height, i))
for i in range(0, height):
self.terrain.append(terrainBarrier(i, 0))
self.terrain.append(terrainBarrier(i, width))
def getTerrain(self):
return self.terrain
def getTerrainAt(self, posY, posX):
for terrain in self.terrain:
if terrain.getX() == posX and terrain.getY() == posY:
return terrain
def getEntity(self, UUID):
for entity in self.entities:
if entity.getUUID() == UUID:
return entity
def getEntities(self):
return self.entities
def addEntity(self, entity):
self.entities.append(entity)
def removeEntity(self, entity):
self.entities.remove(entity)
| 29.410256
| 86
| 0.622493
|
ac581006440aa53709f1602d86be8e7c0dd67ab7
| 391
|
py
|
Python
|
vozdocu/wsgi.py
|
Vido/voxdocu
|
912f2e012af5280467fb9d83641f206e9d921145
|
[
"Apache-2.0"
] | 7
|
2020-12-08T17:06:51.000Z
|
2022-01-11T19:35:10.000Z
|
vozdocu/wsgi.py
|
Vido/zecontinha
|
912f2e012af5280467fb9d83641f206e9d921145
|
[
"Apache-2.0"
] | 2
|
2020-06-05T19:38:58.000Z
|
2020-06-13T02:30:31.000Z
|
vozdocu/wsgi.py
|
Vido/voxdocu
|
912f2e012af5280467fb9d83641f206e9d921145
|
[
"Apache-2.0"
] | 6
|
2020-12-08T19:35:10.000Z
|
2021-11-19T19:22:33.000Z
|
"""
WSGI config for vozdocu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vozdocu.settings")
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
3839998dcd8288346ec307cc88e34ed3a7362e6f
| 3,644
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/microgenomatesgroupbacteriumrifcsplowo201full4613.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/microgenomatesgroupbacteriumrifcsplowo201full4613.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/microgenomatesgroupbacteriumrifcsplowo201full4613.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Microgenomates group bacterium RIFCSPLOWO2_01_FULL_46_13.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MicrogenomatesGroupBacteriumRifcsplowo201Full4613(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Microgenomates group bacterium RIFCSPLOWO2_01_FULL_46_13 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Microgenomates group bacterium RIFCSPLOWO2_01_FULL_46_13 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MicrogenomatesGroupBacteriumRifcsplowo201Full4613",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.704762
| 223
| 0.691273
|
29f3a39f90d250efacebb9a39a35868843ba4bed
| 6,171
|
py
|
Python
|
nf_core/bump_version.py
|
matrulda/tools
|
b48bce26b7da46cb71e4a8f78b68d9ceac579ca6
|
[
"MIT"
] | 1
|
2019-08-14T16:20:04.000Z
|
2019-08-14T16:20:04.000Z
|
nf_core/bump_version.py
|
matrulda/tools
|
b48bce26b7da46cb71e4a8f78b68d9ceac579ca6
|
[
"MIT"
] | 2
|
2020-02-27T11:17:44.000Z
|
2020-12-09T05:45:14.000Z
|
nf_core/bump_version.py
|
matrulda/tools
|
b48bce26b7da46cb71e4a8f78b68d9ceac579ca6
|
[
"MIT"
] | 1
|
2020-12-07T12:32:00.000Z
|
2020-12-07T12:32:00.000Z
|
#!/usr/bin/env python
"""Bumps the version number in all appropriate files for
a nf-core pipeline.
"""
import logging
import os
import re
import rich.console
import sys
import nf_core.utils
log = logging.getLogger(__name__)
stderr = rich.console.Console(stderr=True, force_terminal=nf_core.utils.rich_force_colors())
def bump_pipeline_version(pipeline_obj, new_version):
"""Bumps a pipeline version number.
Args:
pipeline_obj (nf_core.utils.Pipeline): A `Pipeline` object that holds information
about the pipeline contents and build files.
new_version (str): The new version tag for the pipeline. Semantic versioning only.
"""
# Collect the old and new version numbers
current_version = pipeline_obj.nf_config.get("manifest.version", "").strip(" '\"")
if new_version.startswith("v"):
log.warning("Stripping leading 'v' from new version number")
new_version = new_version[1:]
if not current_version:
raise UserWarning("Could not find config variable 'manifest.version'")
log.info("Changing version number from '{}' to '{}'".format(current_version, new_version))
# nextflow.config - workflow manifest version
update_file_version(
"nextflow.config",
pipeline_obj,
[
(
r"version\s*=\s*[\'\"]?{}[\'\"]?".format(current_version.replace(".", r"\.")),
"version = '{}'".format(new_version),
)
],
)
def bump_nextflow_version(pipeline_obj, new_version):
"""Bumps the required Nextflow version number of a pipeline.
Args:
pipeline_obj (nf_core.utils.Pipeline): A `Pipeline` object that holds information
about the pipeline contents and build files.
new_version (str): The new version tag for the required Nextflow version.
"""
# Collect the old and new version numbers - strip leading non-numeric characters (>=)
current_version = pipeline_obj.nf_config.get("manifest.nextflowVersion", "").strip(" '\"")
current_version = re.sub(r"^[^0-9\.]*", "", current_version)
new_version = re.sub(r"^[^0-9\.]*", "", new_version)
if not current_version:
raise UserWarning("Could not find config variable 'manifest.nextflowVersion'")
log.info("Changing Nextlow version number from '{}' to '{}'".format(current_version, new_version))
# nextflow.config - manifest minimum nextflowVersion
update_file_version(
"nextflow.config",
pipeline_obj,
[
(
r"nextflowVersion\s*=\s*[\'\"]?!>={}[\'\"]?".format(current_version.replace(".", r"\.")),
"nextflowVersion = '!>={}'".format(new_version),
)
],
)
# .github/workflows/ci.yml - Nextflow version matrix
update_file_version(
os.path.join(".github", "workflows", "ci.yml"),
pipeline_obj,
[
(
# example: nxf_ver: ['20.04.0', '']
r"nxf_ver: \[[\'\"]{}[\'\"], [\'\"][\'\"]\]".format(current_version.replace(".", r"\.")),
"nxf_ver: ['{}', '']".format(new_version),
)
],
)
# README.md - Nextflow version badge
update_file_version(
"README.md",
pipeline_obj,
[
(
r"nextflow%20DSL2-%E2%89%A5{}-23aa62.svg".format(current_version.replace(".", r"\.")),
"nextflow%20DSL2-%E2%89%A5{}-23aa62.svg".format(new_version),
),
(
# Replace links to 'nf-co.re' installation page with links to Nextflow installation page
r"https://nf-co.re/usage/installation",
"https://www.nextflow.io/docs/latest/getstarted.html#installation",
),
(
# example: 1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=20.04.0`)
r"1\.\s*Install\s*\[`Nextflow`\]\(y\)\s*\(`>={}`\)".format(current_version.replace(".", r"\.")),
"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>={}`)".format(
new_version
),
),
],
)
def update_file_version(filename, pipeline_obj, patterns):
"""Updates the version number in a requested file.
Args:
filename (str): File to scan.
pipeline_obj (nf_core.lint.PipelineLint): A PipelineLint object that holds information
about the pipeline contents and build files.
pattern (str): Regex pattern to apply.
newstr (str): The replaced string.
Raises:
ValueError, if the version number cannot be found.
"""
# Load the file
fn = pipeline_obj._fp(filename)
content = ""
try:
with open(fn, "r") as fh:
content = fh.read()
except FileNotFoundError:
log.warning("File not found: '{}'".format(fn))
return
replacements = []
for pattern in patterns:
found_match = False
newcontent = []
for line in content.splitlines():
# Match the pattern
matches_pattern = re.findall("^.*{}.*$".format(pattern[0]), line)
if matches_pattern:
found_match = True
# Replace the match
newline = re.sub(pattern[0], pattern[1], line)
newcontent.append(newline)
# Save for logging
replacements.append((line, newline))
# No match, keep line as it is
else:
newcontent.append(line)
if found_match:
content = "\n".join(newcontent)
else:
log.error("Could not find version number in {}: '{}'".format(filename, pattern))
log.info("Updated version in '{}'".format(filename))
for replacement in replacements:
stderr.print(" [red] - {}".format(replacement[0].strip()), highlight=False)
stderr.print(" [green] + {}".format(replacement[1].strip()), highlight=False)
stderr.print("\n")
with open(fn, "w") as fh:
fh.write(content)
| 35.0625
| 130
| 0.580943
|
f2a122adac59d6c0436c413e9896b78d0f36e581
| 3,121
|
py
|
Python
|
notifyore/notifiers/growl.py
|
ptone/notifyore
|
43b5aad98f2a5e49f7d2b5ed0bfbf6de0ef3400b
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2015-11-05T08:57:03.000Z
|
2016-07-17T18:11:06.000Z
|
notifyore/notifiers/growl.py
|
ptone/notifyore
|
43b5aad98f2a5e49f7d2b5ed0bfbf6de0ef3400b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
notifyore/notifiers/growl.py
|
ptone/notifyore
|
43b5aad98f2a5e49f7d2b5ed0bfbf6de0ef3400b
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import hashlib
import os
import time
import urllib2
import tempfile
import Growl
from notifyore.notifiers import BaseNotifier
from notifyore.utils import get_convore_logo
def get_growl_image(url):
cache_folder = os.path.join(tempfile.gettempdir(),'profile image cache')
if not os.path.exists(cache_folder):
os.makedirs(cache_folder)
fname = '%s.%s' % (hashlib.md5(url).hexdigest(), url.split('.')[-1])
cached_image = os.path.join(cache_folder,fname)
image = None
if os.path.exists(cached_image):
mtime = os.path.getmtime(cached_image)
#invalidate if over 3 days old
if (time.time() - mtime) > (60 * 60 * 24 * 3):
os.remove(cached_image)
else:
image = Growl.Image.imageFromPath(cached_image)
else:
f = open(cached_image,'wb')
f.write(urllib2.urlopen(url).read())
f.close()
image = Growl.Image.imageFromPath(cached_image)
return image
class GrowlNotifier(BaseNotifier):
def __init__(self, *args, **kwargs):
self.notification_name = kwargs.pop('name', 'Convore Notification')
super(GrowlNotifier, self).__init__(*args, **kwargs)
self.default_image = Growl.Image.imageFromPath(get_convore_logo())
self.growl = Growl.GrowlNotifier(kwargs.get('appname', 'Notifyore'),
[self.notification_name], applicationIcon = self.default_image)
self.growl.register()
def handle_message(self, message):
# growl notification requires:
# title
# text
# img (optional)
# sticky flag
message = self.normalize_message(message)
if 'user' in message:
img = get_growl_image(message['user']['img'])
icon = img
kind = message['kind']
description = message.get('n_message', '')
if description == '':
description = kind
title = None
group = message['n_group']
topic = message['n_topic']
user_line = message['n_user']
title_template = """%(group)s
%(topic)s
%(user_line)s"""
# should display message as:
# Group
# Topic
# Author
# Body of message
if kind == 'mention':
# notification_args['title'] = "%s mentioned you" % notification_args['title']
user_line = "%s mentioned you" % message['n_user']
elif kind == 'topic':
title = "%s created a new topic\nin %s" % (message['n_user'], message['n_group'])
description = message['n_topic']
elif kind in ['login','logout']:
description = kind
elif kind in ['star', 'unstar']:
user_line = "{user} {kind}red message".format(
user=user_line,
kind=kind)
if not title:
title = title_template % {
'group':group,
'topic':topic,
'user_line':user_line}
self.growl.notify(
self.notification_name,
title,
description,
icon=icon)
| 33.202128
| 93
| 0.576097
|
dd010e05dc07be04581b3fdcd00e124d1bb8a260
| 532
|
py
|
Python
|
streams/migrations/0002_auto_20200724_2202.py
|
danielpomas/church_site
|
69d33f3908e4e8b0fdbde9ebb8c14f72050f9efe
|
[
"MIT"
] | null | null | null |
streams/migrations/0002_auto_20200724_2202.py
|
danielpomas/church_site
|
69d33f3908e4e8b0fdbde9ebb8c14f72050f9efe
|
[
"MIT"
] | 44
|
2020-05-13T20:15:26.000Z
|
2022-03-04T02:58:58.000Z
|
streams/migrations/0002_auto_20200724_2202.py
|
danielpomas/church_site
|
69d33f3908e4e8b0fdbde9ebb8c14f72050f9efe
|
[
"MIT"
] | 4
|
2020-06-05T17:59:52.000Z
|
2021-02-06T19:09:43.000Z
|
# Generated by Django 3.0.8 on 2020-07-25 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('streams', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='stream',
name='audio_views',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='stream',
name='video_views',
field=models.IntegerField(default=0),
),
]
| 22.166667
| 49
| 0.567669
|
74ee55700a984a4f4f449e794eee0ff2e1d67ebe
| 6,661
|
py
|
Python
|
ddtrace/pin.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
ddtrace/pin.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
ddtrace/pin.py
|
tancnle/dd-trace-py
|
4313f388383b90ccf2bcbca9d7ef1c400c827ece
|
[
"BSD-3-Clause"
] | null | null | null |
import ddtrace
from .internal.logger import get_logger
from .vendor import wrapt
log = get_logger(__name__)
# To set attributes on wrapt proxy objects use this prefix:
# http://wrapt.readthedocs.io/en/latest/wrappers.html
_DD_PIN_NAME = '_datadog_pin'
_DD_PIN_PROXY_NAME = '_self_' + _DD_PIN_NAME
class Pin(object):
"""Pin (a.k.a Patch INfo) is a small class which is used to
set tracing metadata on a particular traced connection.
This is useful if you wanted to, say, trace two different
database clusters.
>>> conn = sqlite.connect("/tmp/user.db")
>>> # Override a pin for a specific connection
>>> pin = Pin.override(conn, service="user-db")
>>> conn = sqlite.connect("/tmp/image.db")
"""
__slots__ = ['app', 'app_type', 'tags', 'tracer', '_target', '_config', '_initialized']
def __init__(self, service, app=None, app_type=None, tags=None, tracer=None, _config=None):
tracer = tracer or ddtrace.tracer
self.app = app
self.app_type = app_type
self.tags = tags
self.tracer = tracer
self._target = None
# keep the configuration attribute internal because the
# public API to access it is not the Pin class
self._config = _config or {}
# [Backward compatibility]: service argument updates the `Pin` config
self._config['service_name'] = service
self._initialized = True
@property
def service(self):
"""Backward compatibility: accessing to `pin.service` returns the underlying
configuration value.
"""
return self._config['service_name']
def __setattr__(self, name, value):
if getattr(self, '_initialized', False) and name != '_target':
raise AttributeError("can't mutate a pin, use override() or clone() instead")
super(Pin, self).__setattr__(name, value)
def __repr__(self):
return "Pin(service=%s, app=%s, app_type=%s, tags=%s, tracer=%s)" % (
self.service, self.app, self.app_type, self.tags, self.tracer)
@staticmethod
def _find(*objs):
"""
Return the first :class:`ddtrace.pin.Pin` found on any of the provided objects or `None` if none were found
>>> pin = Pin._find(wrapper, instance, conn, app)
:param *objs: The objects to search for a :class:`ddtrace.pin.Pin` on
:type objs: List of objects
:rtype: :class:`ddtrace.pin.Pin`, None
:returns: The first found :class:`ddtrace.pin.Pin` or `None` is none was found
"""
for obj in objs:
pin = Pin.get_from(obj)
if pin:
return pin
return None
@staticmethod
def get_from(obj):
"""Return the pin associated with the given object. If a pin is attached to
`obj` but the instance is not the owner of the pin, a new pin is cloned and
attached. This ensures that a pin inherited from a class is a copy for the new
instance, avoiding that a specific instance overrides other pins values.
>>> pin = Pin.get_from(conn)
:param obj: The object to look for a :class:`ddtrace.pin.Pin` on
:type obj: object
:rtype: :class:`ddtrace.pin.Pin`, None
:returns: :class:`ddtrace.pin.Pin` associated with the object, or None if none was found
"""
if hasattr(obj, '__getddpin__'):
return obj.__getddpin__()
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
pin = getattr(obj, pin_name, None)
# detect if the PIN has been inherited from a class
if pin is not None and pin._target != id(obj):
pin = pin.clone()
pin.onto(obj)
return pin
@classmethod
def override(cls, obj, service=None, app=None, app_type=None, tags=None, tracer=None):
"""Override an object with the given attributes.
That's the recommended way to customize an already instrumented client, without
losing existing attributes.
>>> conn = sqlite.connect("/tmp/user.db")
>>> # Override a pin for a specific connection
>>> Pin.override(conn, service="user-db")
"""
if not obj:
return
pin = cls.get_from(obj)
if not pin:
pin = Pin(service)
pin.clone(
service=service,
app=app,
app_type=app_type,
tags=tags,
tracer=tracer,
).onto(obj)
def enabled(self):
"""Return true if this pin's tracer is enabled. """
return bool(self.tracer) and self.tracer.enabled
def onto(self, obj, send=True):
"""Patch this pin onto the given object. If send is true, it will also
queue the metadata to be sent to the server.
"""
# Actually patch it on the object.
try:
if hasattr(obj, '__setddpin__'):
return obj.__setddpin__(self)
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
# set the target reference; any get_from, clones and retarget the new PIN
self._target = id(obj)
return setattr(obj, pin_name, self)
except AttributeError:
log.debug("can't pin onto object. skipping", exc_info=True)
def remove_from(self, obj):
# Remove pin from the object.
try:
pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME
pin = Pin.get_from(obj)
if pin is not None:
delattr(obj, pin_name)
except AttributeError:
log.debug('can\'t remove pin from object. skipping', exc_info=True)
def clone(self, service=None, app=None, app_type=None, tags=None, tracer=None):
"""Return a clone of the pin with the given attributes replaced."""
# do a shallow copy of Pin dicts
if not tags and self.tags:
tags = self.tags.copy()
# we use a copy instead of a deepcopy because we expect configurations
# to have only a root level dictionary without nested objects. Using
# deepcopy introduces a big overhead:
#
# copy: 0.00654911994934082
# deepcopy: 0.2787208557128906
config = self._config.copy()
return Pin(
service=service or self.service,
app=app or self.app,
app_type=app_type or self.app_type,
tags=tags,
tracer=tracer or self.tracer, # do not clone the Tracer
_config=config,
)
| 36.398907
| 115
| 0.61192
|
3cef4b6d65dda7c1b7cff5ed70ee66301d3182ce
| 409
|
py
|
Python
|
Desafios/Ex-054.py
|
LuckyCards/Curso-Python3
|
b39c7b2645220c71c35012f16c102428053fee25
|
[
"MIT"
] | 1
|
2021-04-06T16:14:43.000Z
|
2021-04-06T16:14:43.000Z
|
Desafios/Ex-054.py
|
LuckyCards/Curso-Python3
|
b39c7b2645220c71c35012f16c102428053fee25
|
[
"MIT"
] | null | null | null |
Desafios/Ex-054.py
|
LuckyCards/Curso-Python3
|
b39c7b2645220c71c35012f16c102428053fee25
|
[
"MIT"
] | null | null | null |
from datetime import date
print(f'\033[33m{"—"*30:^30}\033[m')
print(f'\033[36m{"EXERCÍCIO Nº 54":^30}\033[m')
print(f'\033[33m{"—"*30:^30}\033[m')
maior = 0
menor = 0
for x in range(0, 8):
ano = int(input('Digite seu ano de nascimento: '))
if date.today().year - ano > 21:
maior += 1
else:
menor += 1
print(f'\nPessoas maiores de idade: {maior}\nPessoa menores de idade: {menor}')
| 31.461538
| 79
| 0.606357
|
2d0c79aa8fe2074f411275650271f6bc0c2de0cc
| 765
|
py
|
Python
|
exercices/questao06.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
exercices/questao06.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
exercices/questao06.py
|
LBarros77/Python
|
283b383d9d14c8d7b907b80f03f7cdc5dbd1e8af
|
[
"MIT"
] | null | null | null |
def media(x, y, z = 2):
return (float(x) + float(y) / z)
calender = {
1: "Janeiro",
2: "Fevereiro",
3: "Março",
4: "Abril",
5: "Maio",
6: "Junho",
7: "Julho",
8: "Agosto",
9: "Setembro",
10: "Outubro",
11: "Novembro",
12: "Dezembro",
}
# temperatura max e min
print("Digite o valor das temperaturas: de cada mê.")
month_half = [media(input(f"{i}º mês\nMínima: "), input("Máxima: ")) for i in range(1, 13)]
year_half = media(min(month_half), max(month_half))
print("=" * 50)
print("Média anual: ", year_half,"\n","=" * 50)
print("Valores a cima da média anual: ")
for n, i in enumerate(month_half, start=1):
if i > year_half:
print(f"A temperatura media de {calender[n]} é {i:.2f}")
print("=" * 50)
| 24.677419
| 91
| 0.573856
|
313b55253d4dfbb73b3125f12ebd15ecae436289
| 7,702
|
py
|
Python
|
DeepSpeech/bin/import_swb.py
|
Kutim/Run-Black-Box-Audio
|
6564f255f574ef63eeb24688773f03517c124259
|
[
"MIT"
] | null | null | null |
DeepSpeech/bin/import_swb.py
|
Kutim/Run-Black-Box-Audio
|
6564f255f574ef63eeb24688773f03517c124259
|
[
"MIT"
] | 1
|
2019-07-21T13:22:28.000Z
|
2019-07-21T13:22:28.000Z
|
DeepSpeech/bin/import_swb.py
|
Kutim/Run-Black-Box-Audio
|
6564f255f574ef63eeb24688773f03517c124259
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
# Make sure we can import stuff from util/
# This script needs to be run from the root of the DeepSpeech repository
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import fnmatch
import pandas
import subprocess
import unicodedata
import wave
import codecs
from util.text import validate_label
def _download_and_preprocess_data(data_dir):
data_dir = os.path.join(data_dir, "LDC97S62")
# Conditionally convert swb sph data to wav
_maybe_convert_wav(data_dir, "swb1_d1", "swb1_d1-wav")
_maybe_convert_wav(data_dir, "swb1_d2", "swb1_d2-wav")
_maybe_convert_wav(data_dir, "swb1_d3", "swb1_d3-wav")
_maybe_convert_wav(data_dir, "swb1_d4", "swb1_d4-wav")
# Conditionally split wav data
d1 = _maybe_split_wav_and_sentences(data_dir, "swb_ms98_transcriptions", "swb1_d1-wav", "swb1_d1-split-wav")
d2 = _maybe_split_wav_and_sentences(data_dir, "swb_ms98_transcriptions", "swb1_d2-wav", "swb1_d2-split-wav")
d3 = _maybe_split_wav_and_sentences(data_dir, "swb_ms98_transcriptions", "swb1_d3-wav", "swb1_d3-split-wav")
d4 = _maybe_split_wav_and_sentences(data_dir, "swb_ms98_transcriptions", "swb1_d4-wav", "swb1_d4-split-wav")
swb_files = d1.append(d2).append(d3).append(d4)
train_files, dev_files, test_files = _split_sets(swb_files)
# Write sets to disk as CSV files
train_files.to_csv(os.path.join(data_dir, "swb-train.csv"), index=False)
dev_files.to_csv(os.path.join(data_dir, "swb-dev.csv"), index=False)
test_files.to_csv(os.path.join(data_dir, "swb-test.csv"), index=False)
def _maybe_convert_wav(data_dir, original_data, converted_data):
source_dir = os.path.join(data_dir, original_data)
target_dir = os.path.join(data_dir, converted_data)
# Conditionally convert sph files to wav files
if os.path.exists(target_dir):
print("skipping maybe_convert_wav")
return
# Create target_dir
os.makedirs(target_dir)
# Loop over sph files in source_dir and convert each to 16-bit PCM wav
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, "*.sph"):
for channel in ['1', '2']:
sph_file = os.path.join(root, filename)
wav_filename = os.path.splitext(os.path.basename(sph_file))[0] + "-" + channel + ".wav"
wav_file = os.path.join(target_dir, wav_filename)
print("converting {} to {}".format(sph_file, wav_file))
subprocess.check_call(["sph2pipe", "-c", channel, "-p", "-f", "rif", sph_file, wav_file])
def _parse_transcriptions(trans_file):
segments = []
with codecs.open(trans_file, "r", "utf-8") as fin:
for line in fin:
if line.startswith("#") or len(line) <= 1:
continue
tokens = line.split()
start_time = float(tokens[1])
stop_time = float(tokens[2])
transcript = validate_label(" ".join(tokens[3:]))
if transcript == None:
continue
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
segments.append({
"start_time": start_time,
"stop_time": stop_time,
"transcript": transcript,
})
return segments
def _maybe_split_wav_and_sentences(data_dir, trans_data, original_data, converted_data):
trans_dir = os.path.join(data_dir, trans_data)
source_dir = os.path.join(data_dir, original_data)
target_dir = os.path.join(data_dir, converted_data)
if os.path.exists(target_dir):
print("skipping maybe_split_wav")
return
os.makedirs(target_dir)
files = []
# Loop over transcription files and split corresponding wav
for root, dirnames, filenames in os.walk(trans_dir):
for filename in fnmatch.filter(filenames, "*.text"):
if "trans" not in filename:
continue
trans_file = os.path.join(root, filename)
segments = _parse_transcriptions(trans_file)
# Open wav corresponding to transcription file
channel = ("2","1")[(os.path.splitext(os.path.basename(trans_file))[0])[6] == 'A']
wav_filename = "sw0" + (os.path.splitext(os.path.basename(trans_file))[0])[2:6] + "-" + channel + ".wav"
wav_file = os.path.join(source_dir, wav_filename)
print("splitting {} according to {}".format(wav_file, trans_file))
if not os.path.exists(wav_file):
print("skipping. does not exist:" + wav_file)
continue
origAudio = wave.open(wav_file, "r")
# Loop over segments and split wav_file for each segment
for segment in segments:
# Create wav segment filename
start_time = segment["start_time"]
stop_time = segment["stop_time"]
new_wav_filename = os.path.splitext(os.path.basename(trans_file))[0] + "-" + str(
start_time) + "-" + str(stop_time) + ".wav"
if _is_wav_too_short(new_wav_filename):
continue
new_wav_file = os.path.join(target_dir, new_wav_filename)
_split_wav(origAudio, start_time, stop_time, new_wav_file)
new_wav_filesize = os.path.getsize(new_wav_file)
transcript = segment["transcript"]
files.append((os.path.abspath(new_wav_file), new_wav_filesize, transcript))
# Close origAudio
origAudio.close()
return pandas.DataFrame(data=files, columns=["wav_filename", "wav_filesize", "transcript"])
def _is_wav_too_short(wav_filename):
short_wav_filenames = ['sw2986A-ms98-a-trans-80.6385-83.358875.wav', 'sw2663A-ms98-a-trans-161.12025-164.213375.wav']
return wav_filename in short_wav_filenames
def _split_wav(origAudio, start_time, stop_time, new_wav_file):
frameRate = origAudio.getframerate()
origAudio.setpos(int(start_time * frameRate))
chunkData = origAudio.readframes(int((stop_time - start_time) * frameRate))
chunkAudio = wave.open(new_wav_file, "w")
chunkAudio.setnchannels(origAudio.getnchannels())
chunkAudio.setsampwidth(origAudio.getsampwidth())
chunkAudio.setframerate(frameRate)
chunkAudio.writeframes(chunkData)
chunkAudio.close()
def _split_sets(filelist):
# We initially split the entire set into 80% train and 20% test, then
# split the train set into 80% train and 20% validation.
train_beg = 0
train_end = int(0.8 * len(filelist))
dev_beg = int(0.8 * train_end)
dev_end = train_end
train_end = dev_beg
test_beg = dev_end
test_end = len(filelist)
return (filelist[train_beg:train_end], filelist[dev_beg:dev_end], filelist[test_beg:test_end])
def _read_data_set(filelist, thread_count, batch_size, numcep, numcontext, stride=1, offset=0, next_index=lambda i: i + 1, limit=0):
# Optionally apply dataset size limit
if limit > 0:
filelist = filelist.iloc[:limit]
filelist = filelist[offset::stride]
# Return DataSet
return DataSet(txt_files, thread_count, batch_size, numcep, numcontext, next_index=next_index)
if __name__ == "__main__":
_download_and_preprocess_data(sys.argv[1])
| 40.114583
| 132
| 0.655025
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.