hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d6fef0f0f8deee9202937a684effa0ea3325f10
| 4,293
|
py
|
Python
|
pirates/leveleditor/worldData/tortuga_building_int_8.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/leveleditor/worldData/tortuga_building_int_8.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/leveleditor/worldData/tortuga_building_int_8.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.tortuga_building_int_8
from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {'AmbientColors': {}, 'DirectionalColors': {}, 'FogColors': {}, 'FogRanges': {}, 'Objects': {'1156268617.43dzlu0': {'Type': 'Building Interior', 'Name': '', 'AdditionalData': ['interior_spanish_store_tailor'], 'Instanced': True, 'Objects': {'1196904064.0dxschafe': {'Type': 'Townsperson', 'Category': 'Tailor', 'AnimSet': 'default', 'CustomModel': 'None', 'Hpr': VBase3(-6.317, 0.0, 0.0), 'Patrol Radius': '12.0000', 'Pos': Point3(-7.608, -3.717, 0.0), 'PoseAnim': '', 'PoseFrame': '', 'Private Status': 'All', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'TORTUGA_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Villager', 'TrailFX': 'None'}, '1201041578.2dxschafe': {'Type': 'Door Locator Node', 'Name': 'door_locator', 'Hpr': VBase3(178.916, 0.0, 0.0), 'Pos': Point3(-13.404, 47.298, 5.038), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1201122430.39dxschafe': {'Type': 'Light - Dynamic', 'Attenuation': '0.005', 'ConeAngle': '26.3855', 'DropOff': '0.0000', 'FlickRate': '0.5000', 'Hpr': VBase3(-132.8, -15.313, 2.237), 'Intensity': '1.0723', 'LightType': 'SPOT', 'Pos': Point3(-21.577, 16.0, 10.796), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (1, 1, 1, 1), 'Model': 'models/props/light_tool_bulb'}}, '1212432000.0WDIG1': {'Type': 'Townsperson', 'Category': 'Commoner', 'AnimSet': 'default', 'CustomModel': 'None', 'DNA': '1212432000.0WDIG1', 'HelpID': 'NONE', 'Hpr': VBase3(127.926, 0.0, 0.0), 'Patrol Radius': '12.0000', 'Pos': Point3(13.513, 41.935, 5.265), 'PoseAnim': '', 'PoseFrame': '', 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'ShopID': 'PORT_ROYAL_DEFAULTS', 'Start State': 'Idle', 'StartFrame': '0', 'Team': 'Player', 'TrailFX': 'None'}}, 'Visual': {'Model': 'models/buildings/interior_spanish_store'}}}, 'Node Links': [], 'Layers': {'Collisions': ['1184008208.59kmuller', '1184016064.62kmuller', '1184013852.84kmuller', '1185822696.06kmuller', '1184006140.32kmuller', '1184002350.98kmuller', '1184007573.29kmuller', '1184021176.59kmuller', '1184005963.59kmuller', '1188324241.31akelts', '1184006537.34kmuller', '1184006605.81kmuller', '1187139568.33kmuller', '1188324186.98akelts', '1184006730.66kmuller', '1184007538.51kmuller', '1184006188.41kmuller', '1184021084.27kmuller', '1185824396.94kmuller', '1185824250.16kmuller', '1185823630.52kmuller', '1185823760.23kmuller', '1185824497.83kmuller', '1185824751.45kmuller', '1187739103.34akelts', '1188323993.34akelts', '1184016538.29kmuller', '1185822200.97kmuller', '1184016225.99kmuller', '1195241421.34akelts', '1195242796.08akelts', '1184020642.13kmuller', '1195237994.63akelts', '1184020756.88kmuller', '1184020833.4kmuller', '1185820992.97kmuller', '1185821053.83kmuller', '1184015068.54kmuller', '1184014935.82kmuller', '1185821432.88kmuller', '1185821701.86kmuller', '1195240137.55akelts', '1195241539.38akelts', '1195238422.3akelts', '1195238473.22akelts', '1185821453.17kmuller', '1184021269.96kmuller', '1185821310.89kmuller', '1185821165.59kmuller', '1185821199.36kmuller', '1185822035.98kmuller', '1184015806.59kmuller', '1185822059.48kmuller', '1185920461.76kmuller', '1194984449.66akelts', '1185824206.22kmuller', '1184003446.23kmuller', '1184003254.85kmuller', '1184003218.74kmuller', '1184002700.44kmuller', '1186705073.11kmuller', '1187658531.86akelts', '1186705214.3kmuller', '1185824927.28kmuller', '1184014204.54kmuller', '1184014152.84kmuller']}, 'ObjectIds': {'1156268617.43dzlu0': '["Objects"]["1156268617.43dzlu0"]', '1196904064.0dxschafe': '["Objects"]["1156268617.43dzlu0"]["Objects"]["1196904064.0dxschafe"]', '1201041578.2dxschafe': '["Objects"]["1156268617.43dzlu0"]["Objects"]["1201041578.2dxschafe"]', '1201122430.39dxschafe': '["Objects"]["1156268617.43dzlu0"]["Objects"]["1201122430.39dxschafe"]', '1212432000.0WDIG1': '["Objects"]["1156268617.43dzlu0"]["Objects"]["1212432000.0WDIG1"]'}}
extraInfo = {'camPos': Point3(-3.51505, 25.1753, 18.0312), 'camHpr': VBase3(-45.603, -20.6447, -2.73708e-06), 'focalLength': 1.39999997616, 'skyState': -1, 'fog': 0}
| 613.285714
| 3,837
| 0.702772
|
2a6139b95cd41bfe5c20d3d46f59e85c4b25d953
| 4,122
|
py
|
Python
|
tempest/api/compute/keypairs/test_keypairs_negative.py
|
gamado/ds_tempest_rm_me_please
|
3f5d149b3a32e713c60c59a054035ac2e5c73c28
|
[
"Apache-2.0"
] | 3
|
2016-07-15T12:27:23.000Z
|
2021-04-23T04:41:10.000Z
|
tempest/api/compute/keypairs/test_keypairs_negative.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/keypairs/test_keypairs_negative.py
|
LIS/lis-tempest
|
8e6403b2d6de81c5d18ed867b4977385c8278b75
|
[
"Apache-2.0"
] | 12
|
2016-07-14T18:13:05.000Z
|
2017-07-08T18:45:42.000Z
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute.keypairs import base
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest import test
class KeyPairsNegativeTestJSON(base.BaseKeypairTest):
@test.attr(type=['negative'])
@test.idempotent_id('29cca892-46ae-4d48-bc32-8fe7e731eb81')
def test_keypair_create_with_invalid_pub_key(self):
# Keypair should not be created with a non RSA public key
k_name = data_utils.rand_name('keypair')
pub_key = "ssh-rsa JUNK nova@ubuntu"
self.assertRaises(lib_exc.BadRequest,
self._create_keypair, k_name, pub_key)
@test.attr(type=['negative'])
@test.idempotent_id('7cc32e47-4c42-489d-9623-c5e2cb5a2fa5')
def test_keypair_delete_nonexistent_key(self):
# Non-existent key deletion should throw a proper error
k_name = data_utils.rand_name("keypair-non-existent")
self.assertRaises(lib_exc.NotFound, self.client.delete_keypair,
k_name)
@test.attr(type=['negative'])
@test.idempotent_id('dade320e-69ca-42a9-ba4a-345300f127e0')
def test_create_keypair_with_empty_public_key(self):
# Keypair should not be created with an empty public key
k_name = data_utils.rand_name("keypair")
pub_key = ' '
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative'])
@test.idempotent_id('fc100c19-2926-4b9c-8fdc-d0589ee2f9ff')
def test_create_keypair_when_public_key_bits_exceeds_maximum(self):
# Keypair should not be created when public key bits are too long
k_name = data_utils.rand_name("keypair")
pub_key = 'ssh-rsa ' + 'A' * 2048 + ' openstack@ubuntu'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name, pub_key)
@test.attr(type=['negative'])
@test.idempotent_id('0359a7f1-f002-4682-8073-0c91e4011b7c')
def test_create_keypair_with_duplicate_name(self):
# Keypairs with duplicate names should not be created
k_name = data_utils.rand_name('keypair')
self.client.create_keypair(name=k_name)
# Now try the same keyname to create another key
self.assertRaises(lib_exc.Conflict, self._create_keypair,
k_name)
self.client.delete_keypair(k_name)
@test.attr(type=['negative'])
@test.idempotent_id('1398abe1-4a84-45fb-9294-89f514daff00')
def test_create_keypair_with_empty_name_string(self):
# Keypairs with name being an empty string should not be created
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
'')
@test.attr(type=['negative'])
@test.idempotent_id('3faa916f-779f-4103-aca7-dc3538eee1b7')
def test_create_keypair_with_long_keynames(self):
# Keypairs with name longer than 255 chars should not be created
k_name = 'keypair-'.ljust(260, '0')
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
@test.attr(type=['negative'])
@test.idempotent_id('45fbe5e0-acb5-49aa-837a-ff8d0719db91')
def test_create_keypair_invalid_name(self):
# Keypairs with name being an invalid name should not be created
k_name = 'key_/.\@:'
self.assertRaises(lib_exc.BadRequest, self._create_keypair,
k_name)
| 44.804348
| 78
| 0.688986
|
bd24cac29ec51c23215abd2503772dca0a2fbd2f
| 1,829
|
py
|
Python
|
des.py
|
includeavaneesh/Hybridized-AES-DES-EncryptionDecryption
|
6cf8f100053028ff878b99ce3287af9f0cc1aed1
|
[
"MIT"
] | null | null | null |
des.py
|
includeavaneesh/Hybridized-AES-DES-EncryptionDecryption
|
6cf8f100053028ff878b99ce3287af9f0cc1aed1
|
[
"MIT"
] | null | null | null |
des.py
|
includeavaneesh/Hybridized-AES-DES-EncryptionDecryption
|
6cf8f100053028ff878b99ce3287af9f0cc1aed1
|
[
"MIT"
] | null | null | null |
import time
import string
import random
from Crypto import Random
from Crypto.Cipher import DES
from Crypto.Random import get_random_bytes
from base64 import b64encode, b64decode
start_time = time.time()
class DESCipher(object):
def __init__(self, key):
self.block_size = DES.block_size
self.key = key
def encrypt(self, plain_text):
plain_text = self.__pad(plain_text)
iv = Random.new().read(self.block_size)
print(str(iv))
cipher = DES.new(self.key, DES.MODE_CBC, iv)
encrypted_text = cipher.encrypt(plain_text.encode())
return b64encode(iv + encrypted_text).decode("utf-8")
def decrypt(self, encrypted_text):
encrypted_text = b64decode(encrypted_text)
iv = encrypted_text[:self.block_size]
cipher = DES.new(self.key, DES.MODE_CBC, iv)
plain_text = cipher.decrypt(encrypted_text[self.block_size:]).decode("utf-8")
return self.__unpad(plain_text)
def __pad(self, plain_text):
number_of_bytes_to_pad = self.block_size - len(plain_text) % self.block_size
ascii_string = chr(number_of_bytes_to_pad)
padding_str = number_of_bytes_to_pad * ascii_string
padded_plain_text = plain_text + padding_str
return padded_plain_text
@staticmethod
def __unpad(plain_text):
last_character = plain_text[len(plain_text) - 1:]
return plain_text[:-ord(last_character)]
if __name__ == "__main__":
secret_key = get_random_bytes(8)
print(secret_key)
enc = DESCipher(secret_key)
# res = ''.join(random.choices(string.ascii_uppercase + string.digits, k = 80000000))
res= '245'
encrypted_text = enc.encrypt(res)
print(encrypted_text)
print(enc.decrypt(encrypted_text))
print("--- %s seconds ---" % (time.time() - start_time))
| 35.173077
| 89
| 0.686714
|
825083703a387fa044cb828f0247a29729a32d63
| 12,919
|
py
|
Python
|
install_theme.py
|
ShaneSmiskol/eon-custom-themes
|
f9205af44cbbc714a4cdd6b259447147502aa420
|
[
"Zlib"
] | 1
|
2020-08-13T08:04:52.000Z
|
2020-08-13T08:04:52.000Z
|
install_theme.py
|
ShaneSmiskol/eon-custom-themes
|
f9205af44cbbc714a4cdd6b259447147502aa420
|
[
"Zlib"
] | null | null | null |
install_theme.py
|
ShaneSmiskol/eon-custom-themes
|
f9205af44cbbc714a4cdd6b259447147502aa420
|
[
"Zlib"
] | null | null | null |
#!/usr/bin/python
##################################################################################
# Permission is granted to anyone to use this software for any purpose, #
# excluding commercial applications, and to alter it and redistribute it #
# freely, subject to the following restrictions: #
# #
# 1. The origin of this software must not be misrepresented; you must not #
# claim that you wrote the original software. If you use this software #
# in a product, an acknowledgment in the product documentation is required. #
# #
# 2. Altered source versions must be plainly marked as such, and must not be #
# misrepresented as being the original software. #
# #
# 3. This notice may not be removed or altered from any source #
# distribution. #
# #
# #
# ===Created by Colton (Brandon) S. (@C-ton) for the OpenPilot Community=== #
# === http://endoflinetech.com/eon-themes === #
# #
# With a mission to rid all EONS of Comma.ai branding #
# And give the people the freedom, knowlage, and power #
# to make their EONS purdy! #
# #
# A very special thank you to @ShaneSmiskol for creating the theme picker #
# for his tireless help, and donating the life of his LeEco EON #
# to get the LeEco based EONs supported by this project #
# Although revived least we forget..... #
##################################################################################
# #
# To Get Started Making Your EON Purdy: #
# #
# SSH into your EON: #
# (https://medium.com/@jfrux/comma-eon-getting-connected-with-ssh-3ed6136e4a75) #
# #
# Type the following commands: #
# cd /data #
# git clone https://github.com/Coltonton/eon-custom-themes.git #
# cd /data/eon-custom-themes #
# python install_theme.py #
# #
# Now follow the prompts and make your selections! #
# Everything will be done automagically!!!!! #
# #
# Don't forget to tell your friends!! #
# Love Cole (@C-ton) #
# #
# Did you know that if you have a custom OP fork you can use this #
# program to auto install your custom boot logo & boot annimation for #
# your users? See ./developer/DEVREADME #
# #
##################################################################################
import os
import time
from os import path
from datetime import datetime
from support.support_functions import print_welcome_text, check_auto_installability, get_user_theme, is_affirmative, go_back
from support.support_variables import CONTRIB_THEMES, IS_AUTO_INSTALL, AUTO_INSTALL_CONF
os.chdir(os.path.dirname(os.path.realpath(__file__))) # __file__ is safer since it doesn't change based on where this file is called from
print_welcome_text()
# Crude device detection, *shrug* it works! LeEco does not have tristate!
if path.exists('/sys/devices/virtual/switch/tri-state-key'):
print('\n*** OnePlus EON Device Detected ***')
BOOT_LOGO_THEME_PATH = 'OP3T-Logo/LOGO' # Set the boot logo theme path for 3T
BOOT_LOGO_PATH = '/dev/block/sde17' # Set the boot logo directory for 3T
else:
print('\n*** LeEco EON Device Detected ***\n')
BOOT_LOGO_THEME_PATH = 'LeEco-Logo/SPLASH' # Set the boot logo theme path for Leo
BOOT_LOGO_PATH = '/dev/block/bootdevice/by-name/splash' # Set the boot logo directory for Leo
print('IMPORTANT: Soft-bricking is likely if this detection is incorrect. Is this correct?')
if not is_affirmative():
exit()
class ThemeInstaller:
def __init__(self):
self.backup_dir = datetime.now().strftime('backups/backup.%m-%d-%y--%I.%M.%S-%p') # Get current datetime and store
os.mkdir(self.backup_dir) # Create the session backup folder
if IS_AUTO_INSTALL:
assert check_auto_installability(), 'Auto install has already been performed!'
self.auto_installer()
else:
self.start_loop()
def start_loop(self):
while 1:
self.selected_theme = get_user_theme()
if self.selected_theme is None:
print('Didn\'t select a theme, exiting.')
return
self.get_available_options()
if self.install_function() == 'exit':
return
def get_available_options(self): # Check what assets are available for the selected theme
# Check if the selected theme has a boot logo asset
self.theme_options = []
if os.path.exists('{}/{}/{}'.format(CONTRIB_THEMES, self.selected_theme, BOOT_LOGO_THEME_PATH)):
self.theme_options.append('Boot Logo')
# Check if the selected theme has a boot annimation asset
if os.path.exists('{}/{}/bootanimation.zip'.format(CONTRIB_THEMES, self.selected_theme)):
self.theme_options.append('Boot Animation')
# Check if the selected theme has a OpenPilot Spinner asset
if os.path.exists('{}/{}/spinner'.format(CONTRIB_THEMES, self.selected_theme)):
self.theme_options.append('OP Spinner')
# if os.path.exists('{}/{}/additional'.format(CONTRIB_THEMES, self.selected_theme)): # todo disabled for now
# self.theme_options.append('4. Additional Resources')
def install_function(self): # Self installer program, prompts user on what they want to do
while 1:
options = list(self.theme_options) # this only contains available options from self.get_available_options
if not len(options):
print('The selected theme has no resources available for your device! Try another.')
time.sleep(2)
return
options += ['-Main Menu-', '-Reboot-']
print('What resources do you want to install for the {} theme?'.format(self.selected_theme))
for idx, theme in enumerate(options):
print('{}. {}'.format(idx + 1, theme))
indexChoice = int(input("Enter Index Value: "))
indexChoice -= 1
selected_option = self.theme_options[indexChoice]
if selected_option == 'Boot Logo':
print('Selected to install the {} Boot Logo. Continue?'.format(self.selected_theme))
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
os.system('cp {} {}'.format(BOOT_LOGO_PATH, self.backup_dir)) # Make Backup
os.system('dd if={}/{}/{} of={}'.format(CONTRIB_THEMES, self.selected_theme, BOOT_LOGO_THEME_PATH, BOOT_LOGO_PATH)) # Replace
print('\nBoot Logo installed successfully! Original backed up to {}'.format(self.backup_dir))
print('Press enter to continue!')
input()
elif selected_option == 'Boot Animation':
print('Selected to install the {} Boot Animation. Continue?'.format(self.selected_theme))
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
os.system('mount -o remount,rw /system') # /system read only, must mount as r/w
os.system('mv /system/media/bootanimation.zip {}'.format(self.backup_dir)) # backup
os.system('cp {}/{}/bootanimation.zip /system/media'.format(CONTRIB_THEMES, self.selected_theme)) # replace
os.system('chmod 666 /system/media/bootanimation.zip')
print('\nBoot Animation installed successfully! Original backed up to {}'.format(self.backup_dir))
print('Press enter to continue!')
input()
elif selected_option == 'OP Spinner':
print('Selected to install the {} OP Spinner. Continue?'.format(self.selected_theme))
if not is_affirmative():
print('Not installing...')
time.sleep(1.5)
continue
print('Do you have an OP fork with a custom directory name? (ex. arnepilot, dragonpilot)') # Ask the user if their OP fork used a diffrent directory.
if is_affirmative(): # Yes there is a custom OP dir
print('What is the OP directory name? (case matters, not including /data/)')
op_dir = '/data/{}'.format(input('> ').strip('/')) # get custom dir name, strip slashes for safety
print('Your openpilot directory is {}'.format(op_dir))
input('*** Please enter to continue, or Ctrl+C to abort if this is incorrect! ***')
os.system('mv {}/selfdrive/ui/spinner/spinner {}'.format(op_dir, self.backup_dir))
os.system('cp {}/{}/spinner {}/selfdrive/ui/spinner'.format(CONTRIB_THEMES, self.selected_theme, op_dir))
print('\n{} spinner installed successfully! Original backed up to {}'.format(op_dir.split('/')[2], self.backup_dir))
else: # there is not custom OP dir
os.system('mv /data/openpilot/selfdrive/ui/spinner/spinner {}'.format(self.backup_dir))
os.system('cp {}/{}/spinner /data/openpilot/selfdrive/ui/spinner'.format(CONTRIB_THEMES, self.selected_theme))
print('\nopenpilot spinner installed successfully! Original backed up to {}'.format(self.backup_dir))
print('Press enter to continue!')
input()
elif selected_option == 'Additional Resources': # additional features
print('Additional Resources are not an active feature')
time.sleep(5)
elif selected_option == '-Main Menu-' or selected_option is None:
return
elif selected_option == '-Reboot-':
print('Rebooting.... Enjoy your new theme!!!')
os.system('am start -a android.intent.action.REBOOT') # reboot intent is safer (reboot sometimes causes corruption)
return 'exit'
def auto_installer(self): # Auto Installer program for incorperating into OP forks SEE DEVREADME
if AUTO_INSTALL_CONF['install_logo']: # Auto BootLogo Install Code
os.system('cp {} {}'.format(BOOT_LOGO_PATH, self.backup_dir)) # DEV EDIT SHOULD BE MV
os.system('dd if={}/{}/OP3T-Logo/LOGO of={}'.format(CONTRIB_THEMES, self.selected_theme, BOOT_LOGO_PATH))
print('Boot Logo installed successfully! Original backuped to ' + self.backup_dir)
if AUTO_INSTALL_CONF['install_anim']: # Auto BootAni Install Code
os.system('mount -o remount,rw /system')
os.system('mv /system/media/bootanimation.zip {}'.format(self.backup_dir))
os.system('cp {}/{}/bootanimation.zip /system/media'.format(CONTRIB_THEMES, self.selected_theme))
os.system('chmod 666 /system/media/bootanimation.zip')
print('Boot Logo installed successfully! Original backuped to {}'.format(self.backup_dir))
if AUTO_INSTALL_CONF['install_spinner']: # Auto OP Spinner Code
os.system('cp /data/{}/selfdrive/ui/spinner/spinner {}'.format(AUTO_INSTALL_CONF['openpilot_dir_name'], self.backup_dir)) # TEMP DEV EDIT SHOULD BE MV
os.system('cp {}/{}/spinner /data/{}/selfdrive/ui/spinner'.format(CONTRIB_THEMES, self.selected_theme, AUTO_INSTALL_CONF['openpilot_dir_name']))
print('OP Spinner installed successfully! Original backed up to {}'.format(self.backup_dir))
# if (autoInstallAdditional != 'no'): #Auto additional features Code (Not An Active feature)
# print('Additional Resources are not an active feature') # todo: refactor this
if __name__ == '__main__':
ti = ThemeInstaller()
| 58.990868
| 158
| 0.562505
|
42ca392345207157d4d67fa770a9b3e74cd76821
| 18,414
|
py
|
Python
|
src/img_dataset/ilsvrc2017_cls_multithread.py
|
wenxichen/tensorflow_yolo2
|
f040d9932816d8b2f8d7a67231060f0beea821d4
|
[
"MIT"
] | 25
|
2017-05-15T08:44:26.000Z
|
2019-09-05T05:23:59.000Z
|
src/img_dataset/ilsvrc2017_cls_multithread.py
|
wenxichen/tensorflow_yolo2
|
f040d9932816d8b2f8d7a67231060f0beea821d4
|
[
"MIT"
] | 5
|
2017-05-16T07:18:47.000Z
|
2018-02-14T08:22:56.000Z
|
src/img_dataset/ilsvrc2017_cls_multithread.py
|
wenxichen/tensorflow_yolo2
|
f040d9932816d8b2f8d7a67231060f0beea821d4
|
[
"MIT"
] | 10
|
2017-07-03T13:27:27.000Z
|
2018-11-21T13:10:16.000Z
|
"""ILSVRC 2017 Classicifation Dataset.
"""
import os
import cv2
import math
import numpy as np
import random
import pickle
import xml.etree.ElementTree as ET
from tqdm import trange, tqdm
from multiprocessing import Process, Array, Queue
import config as cfg
class ilsvrc_cls:
def __init__(self, image_set, rebuild=False, data_aug=False,
multithread=False, batch_size=cfg.BATCH_SIZE,
image_size = cfg.IMAGE_SIZE, RGB=False):
self.name = 'ilsvrc_2017_cls'
self.devkit_path = cfg.ILSVRC_PATH
self.data_path = self.devkit_path
self.cache_path = cfg.CACHE_PATH
self.batch_size = batch_size
self.image_size = image_size
self.image_set = image_set
self.rebuild = rebuild
self.multithread = multithread
self.data_aug = data_aug
self.RGB = RGB
self.load_classes()
self.cursor = 0
self.epoch = 1
self.gt_labels = None
assert os.path.exists(self.devkit_path), \
'ILSVRC path does not exist: {}'.format(self.devkit_path)
assert os.path.exists(self.data_path), \
'Path does not exist: {}'.format(self.data_path)
self.prepare()
if self.multithread:
self.prepare_multithread()
self.get = self._get_multithread
else:
self.get = self._get
def prepare(self):
"""Create a list of ground truth that includes input path and label.
"""
# TODO: may still need to implement test
cache_file = os.path.join(
self.cache_path, 'ilsvrc_cls_' + self.image_set + '_gt_labels.pkl')
if os.path.isfile(cache_file) and not self.rebuild:
print('Loading gt_labels from: ' + cache_file)
with open(cache_file, 'rb') as f:
gt_labels = pickle.load(f)
print('{} {} dataset gt_labels loaded from {}'.
format(self.name, self.image_set, cache_file))
else:
if (self.image_set == "train"):
imgset_fname = "train_cls.txt"
else:
imgset_fname = self.image_set + ".txt"
imgset_file = os.path.join(
self.data_path, 'ImageSets', 'CLS-LOC', imgset_fname)
anno_dir = os.path.join(
self.data_path, 'Annotations', 'CLS-LOC', self.image_set)
print('Processing gt_labels using ' + imgset_file)
gt_labels = []
with open(imgset_file, 'r') as f:
for line in tqdm(f.readlines()):
img_path = line.strip().split()[0]
if (self.image_set == "train"):
label = self.class_to_ind[img_path.split("/")[0]]
else:
anno_file = os.path.join(anno_dir, img_path + '.xml')
tree = ET.parse(anno_file)
label = tree.find('object').find('name').text
label = self.class_to_ind[label]
imname = os.path.join(
self.data_path, 'Data', 'CLS-LOC', self.image_set, img_path + ".JPEG")
gt_labels.append(
{'imname': imname, 'label': label})
print('Saving gt_labels to: ' + cache_file)
with open(cache_file, 'wb') as f:
pickle.dump(gt_labels, f)
random.shuffle(gt_labels)
self.gt_labels = gt_labels
self.image_num = len(gt_labels)
self.total_batch = int(math.ceil(self.image_num / float(self.batch_size)))
def _get(self):
"""Get shuffled images and labels according to batchsize.
Return:
images: 4D numpy array
labels: 1D numpy array
"""
images = np.zeros(
(self.batch_size, self.image_size, self.image_size, 3))
labels = np.zeros(self.batch_size)
count = 0
while count < self.batch_size:
imname = self.gt_labels[self.cursor]['imname']
images[count, :, :, :] = self.image_read(
imname, data_aug=self.data_aug)
labels[count] = self.gt_labels[self.cursor]['label']
count += 1
self.cursor += 1
if self.cursor >= len(self.gt_labels):
random.shuffle(self.gt_labels)
self.cursor = 0
self.epoch += 1
return images, labels
def prepare_multithread(self):
"""Preperation for mutithread processing."""
self.reset = False
# num_batch_left should always be -1 until the last batch block of the epoch
self.num_batch_left = -1
self.num_child = 10
self.child_processes = [None] * self.num_child
self.batch_cursor_read = 0
self.batch_cursor_fetched = 0
# TODO: add this to cfg file
self.prefetch_size = 5 # in terms of batch
# TODO: may not need readed_batch after validating everything
self.read_batch_array_size = self.total_batch + self.prefetch_size * self.batch_size
self.readed_batch = Array('i', self.read_batch_array_size)
for i in range(self.read_batch_array_size):
self.readed_batch[i] = 0
self.prefetched_images = np.zeros((self.batch_size * self.prefetch_size
* self.num_child,
self.image_size, self.image_size, 3))
self.prefetched_labels = np.zeros(
(self.batch_size * self.prefetch_size * self.num_child))
self.queue_in = []
self.queue_out = []
for i in range(self.num_child):
self.queue_in.append(Queue())
self.queue_out.append(Queue())
self.start_process(i)
self.start_prefetch(i)
# fetch the first one
desc = 'receive the first half: ' + \
str(self.num_child * self.prefetch_size / 2) + ' batches'
for i in trange(self.num_child / 2, desc=desc):
# print "collecting", i
self.collect_prefetch(i)
def start_process(self, n):
"""Start multiprocessing prcess n."""
self.child_processes[n] = Process(target=self.prefetch,
args=(self.readed_batch,
self.queue_in[n],
self.queue_out[n]))
self.child_processes[n].start()
def start_prefetch(self, n):
"""Start prefetching in process n."""
self.queue_in[n].put([self.cursor + self.batch_size * n * self.prefetch_size,
self.batch_cursor_fetched + self.prefetch_size * n])
# maintain cusor and batch_cursor_fetched here
# so it is easier to syncronize between threads
if n == self.num_child - 1:
batch_block = self.prefetch_size * self.num_child
self.cursor += self.batch_size * batch_block
self.batch_cursor_fetched += batch_block
if self.total_batch <= self.batch_cursor_fetched + batch_block:
self.reset = True
self.num_batch_left = self.total_batch - self.batch_cursor_fetched
# print "batch_cursor_fetched:", self.batch_cursor_fetched
def start_prefetch_list(self, L):
"""Start multiple multiprocessing prefetches."""
for p in L:
self.start_prefetch(p)
def collect_prefetch(self, n):
"""Collect prefetched data, join the processes.
Join is not inculded because it seems faster to have
Queue.get() perform in clusters.
"""
images, labels = self.queue_out[n].get()
fetch_size = self.batch_size * self.prefetch_size
self.prefetched_images[n * fetch_size:(n + 1) * fetch_size] = images
self.prefetched_labels[n * fetch_size:(n + 1) * fetch_size] = labels
def collect_prefetch_list(self, L):
"""Collect and join a list of prefetcging processes."""
for p in L:
self.collect_prefetch(p)
def close_all_processes(self):
"""Empty and close all queues, then terminate all child processes."""
for i in range(self.num_child):
self.queue_in[i].cancel_join_thread()
self.queue_out[i].cancel_join_thread()
for i in range(self.num_child):
self.child_processes[i].terminate()
def load_classes(self):
"""Use the folder name to get labels."""
# TODO: double check if the classes are all the same as for train, test, val
img_folder = os.path.join(
self.data_path, 'Data', 'CLS-LOC', 'train')
print('Loading class info from ' + img_folder)
self.classes = [item for item in os.listdir(img_folder)
if os.path.isdir(os.path.join(img_folder, item))]
self.num_class = len(self.classes)
assert (self.num_class == 1000), "number of classes is not 1000!"
self.class_to_ind = dict(
list(zip(self.classes, list(range(self.num_class)))))
def _get_multithread(self):
"""Get in multithread mode.
Besides getting images and labels,
the function also manages start and end of child processes for prefetching data.
Return:
images: 4D numpy array
labels: 1D numpy array
"""
# print "num_batch_left:", self.num_batch_left
if self.reset:
print "one epoch is about to finish! reseting..."
self.collect_prefetch_list(
range(self.num_child / 2, self.num_child))
self.reset = False
elif self.num_batch_left == -1:
# run the child process
batch_block = self.prefetch_size * self.num_child
checker = (self.batch_cursor_read % batch_block) - 4
# print "checker:", checker
if checker % 5 == 0:
# print "about to start prefetch", checker / 5
self.start_prefetch(int(checker / 5))
if checker / 5 == self.num_child / 2 - 1:
self.collect_prefetch_list(
range(self.num_child / 2, self.num_child))
elif checker / 5 == self.num_child - 1:
self.collect_prefetch_list(range(self.num_child / 2))
assert (self.readed_batch[self.batch_cursor_read] == 1), \
"batch not prefetched!"
start_index = (self.batch_cursor_read
% (self.prefetch_size * self.num_child)) \
* self.batch_size
self.batch_cursor_read += 1
# print "batch_cursor_read:", self.batch_cursor_read
if self.num_batch_left == self.total_batch - self.batch_cursor_read:
# fetch and receive the last few batches of the epoch
L = range(int(math.ceil(self.num_batch_left /
float(self.prefetch_size))))
self.start_prefetch_list(L)
self.collect_prefetch_list(L)
# reset after one epoch
if self.batch_cursor_read == self.total_batch:
self.num_batch_left = -1
self.epoch += 1
self.cursor = 0
self.batch_cursor_read = 0
self.batch_cursor_fetched = 0
random.shuffle(self.gt_labels)
for i in range(self.read_batch_array_size):
self.readed_batch[i] = 0
print "######### reset, epoch", self.epoch, "start!########"
# prefill the fetch task for the new epoch
for i in range(self.num_child):
self.start_prefetch(i)
for i in range(self.num_child / 2):
self.collect_prefetch(i)
return (self.prefetched_images[start_index:start_index + self.batch_size],
self.prefetched_labels[start_index:start_index + self.batch_size])
def prefetch(self, readed_batch, q_in, q_out):
"""Prefetch data when task coming in from q_in
and sent out the images and labels from q_out.
Uses in multithread processing.
q_in send in [cursor, batch_cursor_fetched].
"""
fetch_size = self.batch_size * self.prefetch_size
while True:
cursor, batch_cursor_fetched = q_in.get()
images = np.zeros(
(fetch_size, self.image_size, self.image_size, 3))
labels = np.zeros(fetch_size)
count = 0
while count < fetch_size:
imname = self.gt_labels[cursor]['imname']
images[count, :, :, :] = self.image_read(
imname, data_aug=self.data_aug)
labels[count] = self.gt_labels[cursor]['label']
count += 1
cursor += 1
# to simplify the multithread reading
# the last batch will padded with the images
# from the beginning of the same list
if cursor >= len(self.gt_labels):
cursor = 0
for i in range(batch_cursor_fetched, batch_cursor_fetched + self.prefetch_size):
readed_batch[i] = 1
q_out.put([images, labels])
def image_read(self, imname, data_aug=False):
image = cv2.imread(imname)
if self.RGB:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#####################
# Data Augmentation #
#####################
if data_aug:
flip = bool(random.getrandbits(1))
rotate_deg = random.randint(0, 359)
# 75% chance to do random crop
# another 25% change in maintaining input at self.image_size
# this help simplify the input processing for test, val
# TODO: can make multiscale test input later
random_crop_chance = random.randint(0, 3)
too_small = False
color_pert = bool(random.getrandbits(1))
exposure_shift = bool(random.getrandbits(1))
if flip:
image = image[:, ::-1, :]
# assume color image
rows, cols, _ = image.shape
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotate_deg, 1)
image = cv2.warpAffine(image, M, (cols, rows))
# color perturbation
if color_pert:
hue_shift_sign = bool(random.getrandbits(1))
hue_shift = random.randint(0, 10)
saturation_shift_sign = bool(random.getrandbits(1))
saturation_shift = random.randint(0, 10)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# TODO: currently not sure what cv2 does to values
# that are larger than the maximum.
# It seems it does not cut at the max
# nor normalize the whole by multiplying a factor.
# need to expore this in more detail
if hue_shift_sign:
hsv[:, :, 0] += hue_shift
else:
hsv[:, :, 0] -= hue_shift
if saturation_shift_sign:
hsv[:, :, 1] += saturation_shift
else:
hsv[:, :, 1] -= saturation_shift
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
if exposure_shift:
brighter = bool(random.getrandbits(1))
if brighter:
gamma = random.uniform(1, 2)
else:
gamma = random.uniform(0.5, 1)
image = ((image / 255.0) ** (1.0 / gamma)) * 255
# random crop
if random_crop_chance > 0:
# current random crop upbound is (1.3 x self.image_size)
short_side_len = random.randint(
self.image_size, cfg.RAND_CROP_UPBOUND)
short_side = min([cols, rows])
if short_side == cols:
scaled_cols = short_side_len
factor = float(short_side_len) / cols
scaled_rows = int(rows * factor)
else:
scaled_rows = short_side_len
factor = float(short_side_len) / rows
scaled_cols = int(cols * factor)
# print "scaled_cols and rows:", scaled_cols, scaled_rows
if scaled_cols < self.image_size or scaled_rows < self.image_size:
too_small = True
print "Image is too small,", imname
else:
image = cv2.resize(image, (scaled_cols, scaled_rows))
col_offset = random.randint(
0, scaled_cols - self.image_size)
row_offset = random.randint(
0, scaled_rows - self.image_size)
# print "col_offset and row_offset:", col_offset, row_offset
image = image[row_offset:self.image_size + row_offset,
col_offset:self.image_size + col_offset]
# print "image shape is", image.shape
if random_crop_chance == 0 or too_small:
image = cv2.resize(image, (self.image_size, self.image_size))
else:
image = cv2.resize(image, (self.image_size, self.image_size))
image = image.astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
return image
def save_synset_to_ilsvrcid_map(meta_file):
"""Create a mape from synset to ilsvrcid and save it as a pickle file.
"""
from scipy.io import loadmat
meta = loadmat(meta_file)
D = {}
for item in meta['synsets']:
D[str(item[0][1][0])] = item[0][0][0,0]
pickle_file = os.path.join(os.path.dirname(__file__), 'syn2ilsid_map.pickle')
with open(pickle_file, 'wb') as f:
pickle.dump(D, f)
def save_ilsvrcid_to_synset_map(meta_file):
"""Create a mape from ilsvrcid to synset and save it as a pickle file.
"""
from scipy.io import loadmat
meta = loadmat(meta_file)
D = {}
for item in meta['synsets']:
D[item[0][0][0,0]] = str(item[0][1][0])
pickle_file = os.path.join(os.path.dirname(__file__), 'ilsid2syn_map.pickle')
with open(pickle_file, 'wb') as f:
pickle.dump(D, f)
| 41.102679
| 94
| 0.55876
|
035e2405570fe68462bb1d7b170ecc190c49c8f3
| 5,766
|
py
|
Python
|
testfixtures/tests/test_twisted.py
|
abcdenis/testfixtures
|
a02097aceea6ffb54de49869e9accc26190c5221
|
[
"MIT"
] | 184
|
2015-03-18T09:43:35.000Z
|
2021-08-20T08:22:07.000Z
|
testfixtures/tests/test_twisted.py
|
abcdenis/testfixtures
|
a02097aceea6ffb54de49869e9accc26190c5221
|
[
"MIT"
] | 131
|
2015-09-15T15:06:51.000Z
|
2021-08-24T06:54:01.000Z
|
testfixtures/tests/test_twisted.py
|
abcdenis/testfixtures
|
a02097aceea6ffb54de49869e9accc26190c5221
|
[
"MIT"
] | 62
|
2015-06-11T20:42:36.000Z
|
2021-08-21T01:01:28.000Z
|
from twisted.logger import Logger, formatEvent
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from testfixtures import compare, ShouldRaise, StringComparison as S, ShouldAssert
from testfixtures.compat import PY3
from testfixtures.twisted import LogCapture, INFO
log = Logger()
class TestLogCapture(TestCase):
def test_simple(self):
capture = LogCapture.make(self)
log.info('er, {greeting}', greeting='hi')
capture.check((INFO, 'er, hi'))
def test_captured(self):
capture = LogCapture.make(self)
log.info('er, {greeting}', greeting='hi')
assert len(capture.events) == 1
compare(capture.events[0]['log_namespace'], expected='testfixtures.tests.test_twisted')
def test_fields(self):
capture = LogCapture.make(self, fields=('a', 'b'))
log.info('{a}, {b}', a=1, b=2)
log.info('{a}, {b}', a=3, b=4)
capture.check(
[1, 2],
[3, 4],
)
def test_field(self):
capture = LogCapture.make(self, fields=(formatEvent,))
log.info('er, {greeting}', greeting='hi')
capture.check('er, hi')
def test_check_failure_test_minimal(self):
capture = LogCapture.make(self)
try:
raise Exception('all gone wrong')
except:
log.failure('oh dear')
capture.check_failure_text('all gone wrong')
self.flushLoggedErrors()
def test_check_failure_test_maximal(self):
capture = LogCapture.make(self)
try:
raise TypeError('all gone wrong')
except:
log.failure('oh dear')
log.info("don't look at me...")
capture.check_failure_text(str(TypeError), index=0, attribute='type')
self.flushLoggedErrors()
self.flushLoggedErrors()
def test_raise_logged_failure(self):
capture = LogCapture.make(self)
try:
raise TypeError('all gone wrong')
except:
log.failure('oh dear')
with ShouldRaise(Failure) as s:
capture.raise_logged_failure()
compare(s.raised.value, expected=TypeError('all gone wrong'))
self.flushLoggedErrors()
def test_raise_later_logged_failure(self):
capture = LogCapture.make(self)
try:
raise ValueError('boom!')
except:
log.failure('oh dear')
try:
raise TypeError('all gone wrong')
except:
log.failure('what now?!')
with ShouldRaise(Failure) as s:
capture.raise_logged_failure(start_index=1)
compare(s.raised.value, expected=TypeError('all gone wrong'))
self.flushLoggedErrors()
def test_order_doesnt_matter_ok(self):
capture = LogCapture.make(self)
log.info('Failed to send BAR')
log.info('Sent FOO, length 1234')
log.info('Sent 1 Messages')
capture.check(
(INFO, S('Sent FOO, length \d+')),
(INFO, 'Failed to send BAR'),
(INFO, 'Sent 1 Messages'),
order_matters=False
)
def test_order_doesnt_matter_failure(self):
capture = LogCapture.make(self)
log.info('Failed to send BAR')
log.info('Sent FOO, length 1234')
log.info('Sent 1 Messages')
with ShouldAssert(
"entries not as expected:\n"
"\n"
"expected and found:\n"
"[(<LogLevel=info>, 'Failed to send BAR'), (<LogLevel=info>, 'Sent 1 Messages')]\n"
"\n"
"expected but not found:\n"
"[(<LogLevel=info>, <S:Sent FOO, length abc>)]\n"
"\n"
"other entries:\n"
"[(<LogLevel=info>, {}'Sent FOO, length 1234')]".format('' if PY3 else 'u')
):
capture.check(
(INFO, S('Sent FOO, length abc')),
(INFO, 'Failed to send BAR'),
(INFO, 'Sent 1 Messages'),
order_matters=False
)
def test_order_doesnt_matter_extra_in_expected(self):
capture = LogCapture.make(self)
log.info('Failed to send BAR')
log.info('Sent FOO, length 1234')
with ShouldAssert(
"entries not as expected:\n"
"\n"
"expected and found:\n"
"[(<LogLevel=info>, 'Failed to send BAR'),\n"
" (<LogLevel=info>, <S:Sent FOO, length 1234>)]\n"
"\n"
"expected but not found:\n"
"[(<LogLevel=info>, 'Sent 1 Messages')]\n"
"\n"
"other entries:\n"
"[]"
):
capture.check(
(INFO, S('Sent FOO, length 1234')),
(INFO, 'Failed to send BAR'),
(INFO, 'Sent 1 Messages'),
order_matters=False
)
def test_order_doesnt_matter_extra_in_actual(self):
capture = LogCapture.make(self)
log.info('Failed to send BAR')
log.info('Sent FOO, length 1234')
log.info('Sent 1 Messages')
with ShouldAssert(
"entries not as expected:\n"
"\n"
"expected and found:\n"
"[(<LogLevel=info>, 'Failed to send BAR'), (<LogLevel=info>, 'Sent 1 Messages')]\n"
"\n"
"expected but not found:\n"
"[(<LogLevel=info>, <S:Sent FOO, length abc>)]\n"
"\n"
"other entries:\n"
"[(<LogLevel=info>, {}'Sent FOO, length 1234')]".format('' if PY3 else 'u')
):
capture.check(
(INFO, S('Sent FOO, length abc')),
(INFO, 'Failed to send BAR'),
(INFO, 'Sent 1 Messages'),
order_matters=False
)
| 34.321429
| 95
| 0.544918
|
ac44819f260e908003c7308c02700da36391bdbc
| 22,305
|
py
|
Python
|
Fund_Manager_v2.py
|
joselynzhao/Fund_Manager
|
2e207a4ed1f1cc482e1026ae7bb5dbd9e5bf43e8
|
[
"MIT"
] | 1
|
2021-05-28T05:20:42.000Z
|
2021-05-28T05:20:42.000Z
|
Fund_Manager_v2.py
|
joselynzhao/Fund_Manager
|
2e207a4ed1f1cc482e1026ae7bb5dbd9e5bf43e8
|
[
"MIT"
] | null | null | null |
Fund_Manager_v2.py
|
joselynzhao/Fund_Manager
|
2e207a4ed1f1cc482e1026ae7bb5dbd9e5bf43e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2021/5/24 下午7:25
# @Author : Joselynzhao
# @Email : zhaojing17@forxmail.com
# @File : Fund_Manager_v2.py
# @Software: PyCharm
# @Desc :
import requests
import random
import time
import pandas as pd
import execjs
import json
from tqdm import tqdm
import math
import numpy as np
from Score_Computer import *
from Codes_infor import *
import datetime
# http://fund.eastmoney.com/005296.html
# url1 = 'https://push2.eastmoney.com/api/qt/ulist.np/get?fltt=2&secids=1.000001,0.399001&invt=2&fields=f2,f3,f4,f6,f12,f104,f105,f106&ut=267f9ad526dbe6b0262ab19316f5a25b&cb=jQuery18307458225946461923_1621588092653&_=1621588092705'
'''
0 上证指数 f104涨
'''
class Fund_manager():
def __init__(self):
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
date = t.split(' ')[0]
self.date = date
# self.care_list = care_list
self.headers = {'User-Agent': random.choice(user_agent_list), 'Referer': referer_list[0]} # 每次运行的时候都会重新生成头部
def Runscore(self, input_file_name): # 制定跑哪几列分数
codes_infor = pd.read_excel(f'{input_file_name}.xlsx', dtype=str) # 全部读取为字符串格式
query_codes = codes_infor['code']
query_type = codes_infor['type']
# dataInfors = codes_infor #用codeInfor来初始化
get_Infors = pd.DataFrame()
for i in tqdm(range(len(query_codes))):
code = query_codes[i]
FR = pd.Series(self.getFourRank(code))
TTJJ = pd.Series(self.getWorth_infor1(code)) # 返回很多信息
THS = pd.Series(self.getInfo_fromApp(code))
Water = pd.Series(self.getWaterLevel(code))
# RDZF = pd.Series(self.getRZDF(code))
# 处理获得的数据
# FR 和同花顺也不需要处理 全选
TTJJ = TTJJ[['code', 'name', 'asset', 'clrq', 'levelOfRisk', 'manager', 'maxStar', 'totalnet1',
'orgname', 'week', 'month', 'tmonth', 'hyear', 'year', 'tyear']]
code_infor = pd.DataFrame()
code_infor = pd.concat([code_infor, FR, TTJJ, THS, Water], axis=0)
# code_infor.loc['Source'] = col_code_name
code_infor.columns = [code]
# get_Infors = get_Infors.append(code_infor,ignore_index=True)
get_Infors = pd.concat([get_Infors, code_infor], axis=1)
# 一次行抓取最后一部分数据
get_Infors = get_Infors.transpose()
codes_infor.set_index(['code'], inplace=True)
# query_list = list(get_Infors['code'])
# print(query_list)
# value_infor = pd.DataFrame(self.getWorth_Valuation_forList(query_codes))
# value_infor = value_infor.transpose()
# value_infor = value_infor[['preprice', 'price', 'priceRate', 'rate','net']]
get_Infors = pd.concat([codes_infor, get_Infors], axis=1, join='outer')
# cols = ['type', 'FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth',
# 'FR_month', 'FR_week', 'code', 'name', 'asset', 'clrq', 'levelOfRisk', 'manager', 'maxStar',
# 'totalnet1', 'orgname', 'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear', 'startZF',
# 'JJType', 'manageHBL', 'startManage', 'manageDay', 'workDay',
# 'yearHBL', 'JLlevel', ]
get_Infors = get_Infors[[
'code','type', 'name', 'JJType', 'maxStar', 'clrq', 'levelOfRisk', 'manager', 'JLlevel', 'orgname',
'GPstock',
'FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth', 'FR_month',
'FR_week',
'asset', 'totalnet1', 'DWJZ', 'HC', 'XP', 'BD',
'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear', 'startZF',
'manageHBL', 'yearHBL',
'manageDay', 'workDay',
'Hwater', 'water',
]]
print(get_Infors.columns.tolist())
# 获取数据评分处理
comp_in = get_Infors[
['FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth', 'FR_month',
'FR_week',
'asset', 'totalnet1', 'DWJZ', 'HC', 'XP', 'BD',
'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear', 'startZF',
'manageHBL', 'yearHBL', 'manageDay', 'workDay']].apply(pd.to_numeric, errors='ignore')
# raw_data = pd.read_excel(f'Infors/信息汇总_{self.date}_{input_file_name}.xlsx')
comp_out = self.getFundScore(comp_in)
# code_list = get_Infors['code']
# comp_out['code'] = code_list
comp_out.set_index(get_Infors['code'], inplace=True)
# makeRound2_list = ['S_JL','S_zq','S_zd','S_zs']
# def keepFloat2(one):
# return round(one, 2)
# for name in makeRound2_list:
# # comp_out[name] = pd.Series(map(keepFloat2, comp_out[name]))
# comp_out[name] = round(comp_out[name],2)
# # comp_out[name] = pd.Series(map(lambda x: '%.3f' % x,comp_out[name]))
# 写入数据到文件
get_Infors = pd.concat([get_Infors, comp_out], axis=1)
try:
get_Infors.to_excel(f'Infors/信息汇总_{self.date}_{input_file_name}.xlsx', '信息', index=None, encoding='utf-8')
except Exception as e:
print(e)
# ALL_INFOR = get_Infors[[
# 'type', 'code', 'name','JJType','maxStar','clrq', 'levelOfRisk', 'manager','JLlevel', 'orgname','GPstock',
# 'FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth', 'FR_month', 'FR_week',
# 'asset', 'totalnet1','DWJZ', 'HC', 'XP', 'BD',
# 'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear', 'startZF',
# 'manageHBL', 'yearHBL',
# 'manageDay', 'workDay',
# 'S_JL', 'S_zq', 'S_zd', 'S_zs',
# 'stand_1','stand_2','STABLE_1','STABLE_2','EXCITED_1','EXCITED_2'
# ]]
# get_Infors = pd.read_excel(f'Infors/信息汇总_{self.date}_{input_file_name}.xlsx')
SCORE = get_Infors[[
'code','type', 'name', 'JJType', 'maxStar', 'GPstock', 'manager', 'JLlevel', 'S_JL',
'totalnet1', 'DWJZ', 'HC', 'XP', 'BD',
'STB_up','EXC_~', 'Hwater', 'water',
'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear',
]]
# 主要是根据根数来看操作
# OP_In = get_Infors[['JJType', ]]
# OP = self.getOP()
try:
SCORE.to_excel(f'Scores/汇总_{self.date}_{input_file_name}.xlsx', '信息', index=None, encoding='utf-8')
except Exception as e:
print(e)
def SCORE(self,input_file_name):
get_Infors = pd.read_excel(f'Infors/信息汇总_{self.date}_{input_file_name}.xlsx')
SCORE = get_Infors[[
'code', 'type', 'name', 'JJType', 'maxStar', 'GPstock', 'manager', 'JLlevel', 'S_JL',
'totalnet1', 'DWJZ', 'HC', 'XP', 'BD',
'STB_up', 'EXC_~', 'Hwater', 'water',
'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'fyear',
]]
# 主要是根据根数来看操作
# OP_In = get_Infors[['JJType', ]]
# OP = self.getOP()
try:
SCORE.to_excel(f'Scores/汇总_{self.date}_{input_file_name}.xlsx', '信息', index=None, encoding='utf-8')
except Exception as e:
print(e)
# 试着进行操作指示
def getOP(self,df):
pass
# print(0)
def getRZDF(self, fscode):
# 'http://gz-fund.10jqka.com.cn/?module=api&controller=index&action=chart&info=vm_fd_163406&start=0930&_=1621995244528'
# 'http://gz-fund.10jqka.com.cn/?module=api&controller=index&action=chart&info=vm_fd_JSH108&start=0930&_=1621994890153'
url = 'http://gz-fund.10jqka.com.cn/?module=api&controller=index&action=chart&info=vm_fd_' + fscode + '&start=0930'
content = requests.get(url, headers=self.headers).text # str类型
try:
_, baseline, raw_data = content.split('~')
except Exception as e:
print(e)
return {'RZDF': -1000}
try:
baseline = float(baseline)
except Exception as e:
print(e)
return {'RZDF': -1000}
one_data = raw_data.split(';')[-1]
time, end_data, _, _ = one_data.split(',')
end_data = float(end_data)
return {'RZDF': round((end_data - baseline) * 100 / (baseline), 2)}
def getFourRank(self, fscode): # 获得四分位排名
url = 'http://fund.10jqka.com.cn/ifindRank/quarter_year_' + fscode + '.json'
content = requests.get(url, headers=self.headers).text # str类型
try:
jscontent = json.loads(content)
except Exception as e:
print(e)
results = {}
try:
rawdata = jscontent['nowCommonTypeRank']
except Exception as e:
print(e)
return results
title = ['fyear', 'tyear', 'twoyear', 'year', 'nowyear', 'hyear', 'tmonth', 'month', 'week']
for name in title:
try:
results['FR_' + name] = float(rawdata[name][2])
except Exception as e:
results['FR_' + name] = math.nan
return results
def getDWJZ(self, fscode): # 单位净值数据
url = 'http://fund.10jqka.com.cn/163406/json/jsondwjz.json' # 单位净值数据、
# url = 'http://fund.10jqka.com.cn/163406/json/jsonljjz.json' # 累计净值数据、
# url = 'http://fund.10jqka.com.cn/163406/json/jsonfqjz.json' # 收益
try:
url = 'http://fund.10jqka.com.cn/ifindRank/quarter_year_' + fscode + '.json'
except Exception as e:
print(e)
code = str(fscode) # 转换为str格式
url = 'http://fund.10jqka.com.cn/ifindRank/quarter_year_' + code + '.json'
def getWaterLevel(self, fscode):
url = 'http://fund.10jqka.com.cn/' + fscode + '/json/jsonljjz.json'
content = requests.get(url, headers=self.headers).text # str类型
try:
raw_data = content.split('=')[1]
except Exception as e:
print(e)
return {'Hwater': '000 ', 'water': '000'}
raw_data = json.loads(raw_data)
raw_data = pd.DataFrame(raw_data)
try:
raw_data.set_index([0], inplace=True)
except Exception as e:
print(e)
return {'Hwater': '000 ', 'water': '000'}
# if len(raw_data)>=400:
# raw_data = raw_data[len(raw_data)-400:]
def getday(y, m, d, n):
the_date = datetime.datetime(y, m, d)
result_date = the_date + datetime.timedelta(days=n)
d = result_date.strftime('%Y-%m-%d')
d = ''.join(d.split('-'))
return d
year, month, day = self.date.split('-')
dates = raw_data.index.tolist()
today_subDay = 0
today = year + month + day
while (today not in dates):
today_subDay -= 1
today = getday(int(year), int(month), int(day), today_subDay)
year_subDay = -366
year_date = getday(int(year), int(month), int(day), year_subDay)
if int(year_date) - int(dates[0]) > 6:
while (year_date not in dates):
year_subDay -= 1
year_date = getday(int(year), int(month), int(day), year_subDay)
index_year = dates.index(year_date)
year_datas = raw_data[index_year:]
year_max_value = float(max(year_datas[1]))
year_base_value = float(year_datas.loc[year_date])
today_value = float(year_datas.loc[today])
if year_max_value !=year_base_value:
# hyear_max_index = hyear_datas.index(hyear_max_value)
year_water = (today_value - year_base_value) / (year_max_value - year_base_value)
else:
year_min_value = float(min(year_datas[1]))
year_water = (today_value-year_min_value)/(year_base_value-year_min_value)
year_water = round(year_water, 4)
else:
year_water = -1000
hyear_subDay = -183
hyear_date = getday(int(year), int(month), int(day), hyear_subDay)
if int(hyear_date) - int(dates[0]) > 6:
while (hyear_date not in dates):
hyear_subDay -= 1
hyear_date = getday(int(year), int(month), int(day), hyear_subDay)
index_hyear = dates.index(hyear_date)
hyear_datas = raw_data[index_hyear:]
hyear_max_value = float(max(hyear_datas[1]))
hyear_base_value = float(hyear_datas.loc[hyear_date])
today_value = float(hyear_datas.loc[today])
if hyear_max_value != hyear_base_value:
# hyear_max_index = hyear_datas.index(hyear_max_value)
hyear_water = (today_value - hyear_base_value) / (hyear_max_value - hyear_base_value)
else:
hyear_min_value = float(min(hyear_datas[1]))
hyear_water = (today_value-hyear_min_value)/(hyear_base_value-hyear_min_value)
hyear_water = round(hyear_water, 4)
else:
hyear_water = -1000
return {'Hwater': hyear_water, 'water': year_water}
def getWorth_infor1(self, fscode): # 根据Url返回请的文本,格式是字典类型。
# 同花顺上的单只基金页面信息
url = 'http://fund.10jqka.com.cn/data/client/myfund/' + fscode
content = requests.get(url, headers=self.headers).text # str类型
jscontent = json.loads(content)
rawdata = jscontent['data'][0]
return rawdata
def getWorth_Valuation_forList(self, query_list): # 获取care_list的估值信息,格式是字典类型。
# 基金估值、获取基金当日涨幅情况
url = 'http://fund.ijijin.cn/data/Net/gz/all_priceRate_desc_0_0_1_9999_0_0_0_jsonp_g.html'
content = requests.get(url, headers=self.headers).text # str类型
# 提取文本有效信息
content_ = content[2:-1]
jscontent = json.loads(content_)
rawdata = jscontent['data'] #
result = {}
# print(f"正在为{self.care_list['name']}爬取第二部分数据……")
for i in tqdm(range(len(query_list))):
fscode = query_list[i]
# for fscode in self.care_list:
key = 'f' + fscode
try:
result[fscode] = rawdata[key]
except Exception as e:
print(e)
# 对异常情况的处理
result[fscode] = {}
return result
def getInfo_fromApp(self, fscode): # 从天天基金的app上抓取的 返回字典
# url = 'https://j5.dfcfw.com/sc/tfs/qt/v2.0.1/110003.json?rand=1621596866760'
url = 'https://j5.dfcfw.com/sc/tfs/qt/v2.0.1/' + fscode + '.json'
content = requests.get(url, headers=self.headers).text
data1 = json.loads(content) # str类型
All_INFO = {}
JJXQ = data1["JJXQ"]["Datas"]
JJJL = data1['JJJL']["Datas"][0]
JJJLNEW = data1['JJJLNEW']["Datas"][0]
JJCC = data1['JJCC']["Datas"]
JDZF = data1['JDZF']["Datas"]
# All_INFO['产品特色']=JJXQ['COMMENTS']
# All_INFO['单位净值']=JJXQ['DWJZ']
# All_INFO['成立日期']=JJXQ['ESTABDATE']
# All_INFO['基金规模']=JJXQ['ENDNAV']
# All_INFO['日涨幅(%)']=JJXQ['RZDF']
# All_INFO['累计净值']=JJXQ['LJJZ']
# All_INFO['近1周涨幅(%)']=JDZF[0]['syl']
# All_INFO['近1月涨幅(%)']=JDZF[1]['syl']
# All_INFO['近3月涨幅(%)']=JDZF[2]['syl']
# All_INFO['近6月涨幅(%)']=JDZF[3]['syl']
# All_INFO['近1年涨幅(%)']=JDZF[4]['syl']
# All_INFO['近2年涨幅(%)']=JDZF[5]['syl']
# All_INFO['近3年涨幅(%)']=JDZF[6]['syl']
All_INFO['fyear'] = JDZF[7]['syl']
All_INFO['startZF'] = JDZF[9]['syl']
All_INFO['JJType'] = JJXQ['FTYPE'] # 股票指数
All_INFO['HC'] = JJXQ['MAXRETRA1']
All_INFO['XP'] = JJXQ['SHARP1']
All_INFO['BD'] = JJXQ['STDDEV1']
# All_INFO['RZDF'] = JJXQ['RZDF']
All_INFO['DWJZ'] = JJXQ['DWJZ']
All_INFO['Today'] = JJXQ['FSRQ']
All_INFO['RISKLEVEL'] = JJXQ['RISKLEVEL']
# All_INFO['基金经理']=JJJL['MGRNAME']
All_INFO['manageHBL'] = JJJL['PENAVGROWTH']
All_INFO['startManage'] = JJJL['FEMPDATE']
All_INFO['manageDay'] = JJJL['DAYS']
All_INFO['workDay'] = JJJLNEW['MANGER'][0]['TOTALDAYS']
All_INFO['yearHBL'] = JJJLNEW['MANGER'][0]['YIELDSE']
All_INFO['JLlevel'] = JJJLNEW['MANGER'][0]['HJ_JN']
# All_INFO['定投近1年收益(%)']=JJXQ['PTDT_Y'] #暂时把定投数据关闭
# All_INFO['定投近2年收益(%)']=JJXQ['PTDT_TWY']
# All_INFO['定投近3年收益(%)']=JJXQ['PTDT_TRY']
try:
All_INFO['GPstock'] = JJCC['InverstPosition']['fundStocks'][0]['GPJC'] + ',' + \
JJCC['InverstPosition']['fundStocks'][1]['GPJC'] + ',' + \
JJCC['InverstPosition']['fundStocks'][2]['GPJC']
except Exception as e:
print(e)
All_INFO['GPstock'] = ''
# df = pd.DataFrame(All_INFO)
# print(0)
return All_INFO
def getManagerScore(self, df): # 单独把基金经理的水平算出来
data = df
comp = pd.DataFrame()
comp['manageHBL'] = pd.Series(map(startHBLScore, data['manageHBL']))
comp['workDay'] = pd.Series(map(workdaysScore, data['workDay']))
comp['yearHBL'] = pd.Series(map(yearHBLScore, data['yearHBL']))
# sum score
comp['score'] = (comp['manageHBL'] + comp['workDay'] + comp['yearHBL']) / 3
return comp['score']
def StartFund_select(self, df): # 通过硬性指标筛选好基金。
data = df
comp = pd.DataFrame()
def Stand_1(size, tyearZF, HC, XP, BD, FR_tyear, FR_year, FR_hyear):
if isGood(size, 1, 20) and isGood(tyearZF, 1, 80) and isGood(HC, 0, 25) and isGood(
XP, 1, 1.5) and isGood(BD, 0, 30) and isGood(FR_tyear, 1, 80) and isGood(FR_year, 1, 80) and isGood(
FR_hyear, 1, 60):
return 1
else:
return 0
def Stand_2(size, tyearZF, HC, XP, FR_year):
if isGood(size, 1, 20) and isGood(tyearZF, 1, 80) and isGood(HC, 0,
25) and isGood(
XP, 1, 1.5) and isGood(FR_year, 1, 80):
return 1
else:
return 0
def STABLE_1(HC, BD): # 在稳定波动和回测的情况下,涨幅越大越好
if isGood(HC, 0, 10) and isGood(BD, 0, 20):
return 1
else:
return 0
def STABLE_2(HC, BD): # 在稳定波动和回测的情况下,涨幅越大越好
if isGood(HC, 0, 20) and isGood(BD, 0, 25):
return 1
else:
return 0
def EXCITED_1(HC, BD, tyearZF): # 激进
if isGood(HC, 1, 25) and isGood(BD, 1, 30) and isGood(tyearZF, 1, 150):
return 1
else:
return 0
def EXCITED_2(HC, BD, tyearZF): # 激进
if isGood(HC, 1, 20) and isGood(BD, 1, 25) and isGood(tyearZF, 1, 100):
return 1
else:
return 0
comp['STA_1'] = pd.Series(
map(Stand_1, data['asset'], data['tyear'], data['HC'], data['XP'], data['BD'],
data['FR_tyear'], data['FR_year'], data['FR_hyear']))
comp['STA_2'] = pd.Series(
map(Stand_2, data['asset'], data['tyear'], data['HC'], data['XP'],
data['FR_year']))
comp['STB_1'] = pd.Series(map(STABLE_1, data['HC'], data['BD']))
comp['STB_2'] = pd.Series(map(STABLE_2, data['HC'], data['BD']))
comp['EXC_1'] = pd.Series(map(EXCITED_1, data['HC'], data['BD'], data['tyear']))
comp['EXC_2'] = pd.Series(map(EXCITED_2, data['HC'], data['BD'], data['tyear']))
return comp
def getFundScore(self, raw_data): # 单独计算基金的指标
data = raw_data
FR = pd.DataFrame()
FR_list = ['FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth', 'FR_month',
'FR_week']
FR['score'] = pd.Series(np.zeros(len(data)))
for name in FR_list:
FR[name] = pd.Series(map(FRScore, data[name]))
FR['score'] = FR['score'] + FR[name]
FR['score'] = FR['score'] / len(FR_list)
comp = pd.DataFrame()
comp['tyearZF'] = pd.Series(map(tyearZFScore, data['tyear']))
comp['HC'] = pd.Series(map(HC_justScore, data['tyear'], data['HC'])) # 回测校准
comp['XP'] = pd.Series(map(XPScore, data['XP']))
comp['BD'] = pd.Series(map(BDScore, data['BD']))
comp['FR'] = FR['score']
comp['HC_stan'] = pd.Series(map(HCScore, data['HC'])) # 标准回测
compOut = pd.DataFrame()
compOut['S_JL'] = round(pd.Series(self.getManagerScore(raw_data)), 2)
# compOut['S_zq'] = round(
# (comp['XP'] + comp['HC'] * 2 + comp['BD'] + comp['FR'] + compOut['S_JL'] + comp['tyearZF']) / 7, 2)
# compOut['S_zd'] = round(
# (comp['XP'] * 2 - comp['HC'] - comp['BD'] + comp['FR'] + compOut['S_JL'] + comp['tyearZF']) / 3, 2)
# compOut['S_zs'] = round((comp['XP']*2 + comp['HC'] + comp['FR'] + comp['tyearZF']) / ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc 4, 2)
compOut['STB_up'] = round((comp['HC_stan']*3 +comp['BD']*2+comp['XP']*2+compOut['S_JL'])/8,2)
compOut['EXC_~'] = round((comp['XP']*2-comp['HC_stan']-comp['BD']+compOut['S_JL'])/1,2)
# compOut = pd.concat([compOut, self.StartFund_select(raw_data)], axis=1, join="outer")
# print(data.columns.tolist())
# colll = ['FR_fyear', 'FR_tyear', 'FR_twoyear', 'FR_year', 'FR_nowyear', 'FR_hyear', 'FR_tmonth', 'FR_month',
# 'FR_week', 'code', 'name', 'asset', 'clrq', 'levelOfRisk', 'manager', 'maxStar', 'net', 'totalnet1',
# 'orgname', 'week', 'month', 'tmonth', 'hyear', 'year', 'tyear', 'JJType', 'fyearZF', 'startZF', 'HC',
# 'XP', 'BD', 'manageHBL', 'startManage', 'manageDay', 'workDay', 'yearHBL', 'JLlevel', 'GPstock',
# 'Source', 'date', 'preprice', 'price', 'priceRate', 'rate', 'S_JL', 'S_zq', 'S_zd', 'S_zs', 'stand_1',
# 'stand_2', 'STABLE_1', 'STABLE_2', 'EXCITED_1', 'EXCITED_2']
return compOut
# print(0)
if __name__ == "__main__":
FM = Fund_manager()
FM.SCORE('zj_codes')
# print(FM.getRZDF('163406'))
| 44.61
| 231
| 0.551446
|
331da8a476d0a4d80a974336c1a4e0d6e78dbabd
| 230
|
py
|
Python
|
train_pipeline.py
|
mbranbilla/creditas-challenge
|
f28d466fd207008365fe8c2e0e64bfbdaddbd04b
|
[
"MIT"
] | 1
|
2021-01-20T19:05:18.000Z
|
2021-01-20T19:05:18.000Z
|
train_pipeline.py
|
mbranbilla/creditas-challenge
|
f28d466fd207008365fe8c2e0e64bfbdaddbd04b
|
[
"MIT"
] | null | null | null |
train_pipeline.py
|
mbranbilla/creditas-challenge
|
f28d466fd207008365fe8c2e0e64bfbdaddbd04b
|
[
"MIT"
] | null | null | null |
import os
os.system("python3 scripts/load_data.py")
os.system("python3 scripts/load_data.py")
os.system("python3 scripts/preprocessing.py")
os.system("python3 scripts/feature_generation.py")
os.system("python3 scripts/model.py")
| 28.75
| 50
| 0.791304
|
c03d8e037aefb572d32e100891a1824750a6685c
| 31
|
py
|
Python
|
Bronze/Bronze_V/24078.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
Bronze/Bronze_V/24078.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
Bronze/Bronze_V/24078.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
X = int(input())
print(X % 21)
| 10.333333
| 16
| 0.548387
|
8ffecb3b03d5a70c611d3716a07beb6206fe004e
| 17,342
|
py
|
Python
|
nerdlandbot/commands/GuildData.py
|
woutermeuwis/nerdlandbot
|
efd301ed6cb4c11e4d3ce851f328bce02ca00617
|
[
"MIT"
] | 5
|
2020-05-21T11:52:57.000Z
|
2020-05-23T21:55:37.000Z
|
nerdlandbot/commands/GuildData.py
|
woutermeuwis/nerdlandbot
|
efd301ed6cb4c11e4d3ce851f328bce02ca00617
|
[
"MIT"
] | 7
|
2020-05-21T12:12:14.000Z
|
2020-05-24T16:15:37.000Z
|
nerdlandbot/commands/GuildData.py
|
woutermeuwis/nerdlandbot
|
efd301ed6cb4c11e4d3ce851f328bce02ca00617
|
[
"MIT"
] | null | null | null |
import json
from os import path, listdir, makedirs
from typing import List, Optional
from discord import Member
from datetime import datetime
from nerdlandbot.helpers.constants import DEFAULT_MEMBER_NOTIFICATION_NUMBER
_configFolder = "GuildConfigs"
_guildConfigCache = dict()
if not path.exists(_configFolder):
makedirs(_configFolder)
class GuildData:
bot_admins: list
guild_id: int
notification_lists: dict
youtube_channels: dict
purgers: dict
culture: str
pets: dict
pets_last_id: Optional[int]
pets_categories: List[str]
mod_channel: str
church_channel: int
church_event: list
member_notification_number: int
def __init__(self, guild_id: int):
self.guild_id = guild_id
self.notification_lists = dict()
self.youtube_channels = dict()
self.purgers = dict()
self.bot_admins = []
self.culture = "en"
self.pets = {}
self.pets_last_id = None
self.pets_categories = []
self.mod_channel = None
self.church_channel = None
self.church_event = []
self.member_notification_number = DEFAULT_MEMBER_NOTIFICATION_NUMBER
async def sub_user(self, list_name: str, user_id: int) -> bool:
"""
Adds a user to the list if not already there.
:param list_name: The list to add the user to. (str)
:param user_id: The user to add to the list. (int)
:return: True if added successfully, False if already in list. (bool)
"""
user_list = self.notification_lists[list_name]["users"]
if user_id not in user_list:
# user not in list, add to list and return True
user_list.append(user_id)
await self.save()
return True
else:
# user already in list, return false
return False
async def unsub_user(self, list_name: str, user_id: int) -> bool:
"""
Removes a user from the list
:param list_name: The list to remove the user from. (str)
:param user_id: The user to remove from the list. (str)
:returns: True if the user is successfully removed, False if the user is not on the list. (bool)
"""
user_list = self.notification_lists[list_name]["users"]
if user_id in user_list:
# user exists in list, remove and return True
user_list.remove(user_id)
await self.save()
return True
else:
# user does not exist in list, return False
return False
def get_users_list(self, list_name: str) -> List[str]:
"""
Return all users who subscribed to the given list
:param list_name: The list to fetch. (str)
:return: A list with the id of all users who subscribed to the given list. (list[str])
"""
return self.notification_lists[list_name]["users"]
def get_emoji(self, list_name: str) -> (str, bool):
"""
Return the emoji for the given list
:param list_name: The list to fetch. (str)
:return: the emoji to use (str), if the emoji is a custom emoji(bool)
"""
return self.notification_lists[list_name]["emoji"], self.notification_lists[list_name]["is_custom_emoji"]
def does_list_exist(self, list_name: str) -> bool:
"""
Checks whether or not a list exists.
:param list_name: The name of the list to check. (str)
:return: True if the list exists, False if it doesn't. (bool)
"""
return list_name in self.notification_lists.keys()
async def add_notification_list(self, list_name: str, emoji, custom_emoji: bool):
"""
Adds a new notification list.
:param list_name: The name of the list to add. (str)
:param emoji: The emoji to be used for the list. (any)
:param custom_emoji: Whether or not we're using a custom emoji. (bool)
"""
self.notification_lists[list_name] = {
"emoji": emoji,
"is_custom_emoji": custom_emoji,
"users": [],
"created_on": datetime.now().isoformat(),
"notified_on": []
}
await self.save()
async def remove_notification_list(self, list_name: str):
"""
Removes a notification list.
:param list_name: The list to be removed. (str)
"""
if list_name in self.notification_lists.keys():
del self.notification_lists[list_name]
await self.save()
async def save(self):
"""
Saves the current data to storage
"""
await self.__write_file()
async def __write_file(self):
"""
Write data to file
"""
# TODO: Actually make this async
with open(get_config_file_path(self.guild_id), "w+") as config:
json.dump(self.__dict__, config, indent=4, sort_keys=True)
async def add_admin(self, user_id: int):
"""
Add a new bot admin
:param user_id: The id of the user to promote to admin (int)
"""
if user_id not in self.bot_admins:
self.bot_admins.append(user_id)
await self.save()
async def remove_admin(self, user_id: int):
"""
Removes a bot admin
:param user_id: The id of the user to revoke bot admin rights from. (int)
"""
if user_id in self.bot_admins:
self.bot_admins.remove(user_id)
await self.save()
def user_is_admin(self, user_to_check: Member):
"""
Checks whether or not a user is a bot admin.
:param user_to_check: The user to check (discord.Member)
:return: True if the user is either a bot admin or a server admin, False if the user is neither (bool)
"""
# returns True if the user is a server admin or bot admin
# returns False if the user is neither a server admin or a bot admin
return (
user_to_check.guild_permissions.administrator
or user_to_check.id in self.bot_admins
)
def user_is_admin_moderator(self, user_to_check: Member):
"""
Checks whether or not a user is a bot admin or a moderator.
:param user_to_check: The user to check (discord.Member)
:return: True if the user is either a bot admin or a server moderator, False if the user is neither (bool)
"""
# Checks whether the user is moderator by checking the 'Ban Members permission'
return (
user_to_check.guild_permissions.administrator
or user_to_check.guild_permissions.ban_members
or user_to_check.id in self.bot_admins
)
async def update_language(self, language: str):
"""
Updates the language and saves the guild
:param language: The new language. (str)
"""
if language != self.culture:
self.culture = language
await self.save()
async def add_youtube_channel(
self,
youtube_channel_id: str,
text_channel: str,
latest_video_id: Optional[str] = None,
) -> bool:
"""
Adds a youtube channel if not already there.
:param youtube_channel_id: The Youtube channel to be notified for (str)
:param text_channel: The text channel that will receive the notification (str)
:param latest_video_id: ID of the latest video (optional - str - default = None)
:return: True if added successfully, False if already in list. (bool)
"""
if youtube_channel_id not in self.youtube_channels.keys():
# youtube channel not in list, add to list and return True
self.youtube_channels[youtube_channel_id] = {
"latest_video_id": latest_video_id,
"text_channel_id": text_channel.id,
}
await self.save()
return True
else:
# youtube channel already in list, return false
return False
async def remove_youtube_channel(self, youtube_channel_id: str) -> bool:
"""
Remove a youtube channel
:param youtube_channel_id: The Youtube channel to be removed (str)
:return: True if added successfully, False if already in list. (bool)
"""
if youtube_channel_id in self.youtube_channels.keys():
# youtube channel exists in list, remove and return True
self.youtube_channels.pop(youtube_channel_id, None)
await self.save()
return True
else:
# youtube channel does not exist in list, return False
return False
async def add_purger(self, text_channel, max_age: int) -> bool:
"""
Adds a purger channel if not already there.
:param text_channel: The text channel that will be purged (Discord Channel)
:param max_age: The max age of messages in minutes (int)
:return: True if added successfully, False if already in list. (bool)
"""
if str(text_channel.id) not in self.purgers.keys():
# purger text channel not in list, add to list and return True
self.purgers[str(text_channel.id)] = max_age
await self.save()
return True
else:
# purger text channel already in list, return false
return False
async def remove_purger(self, text_channel) -> bool:
"""
Remove a purger channel
:param text_channel: The text channel with attached purger to be removed (Discord Channel)
:return: True if added successfully, False if already in list. (bool)
"""
if str(text_channel.id) in self.purgers.keys():
# purger text channel exists in list, remove and return True
self.purgers.pop(str(text_channel.id), None)
await self.save()
return True
else:
# purger text channel does not exist in list, return False
return False
async def update_notification_audit(self, list_name: str) -> bool:
"""
Updates the notified_on field for the notified list
:param list_name: The list that needs to be updated (str)
:return: True if updated successfully, False if something went wrong. (bool)
"""
if not list_name in self.notification_lists.keys():
return False
notification_list = self.notification_lists[list_name]
if not "notified_on" in notification_list.keys():
notification_list["notified_on"] = []
notification_list["notified_on"].append(datetime.now().isoformat())
await self.save()
return True
async def add_pet(self, pet_name: str, user_id: str, category: str) -> None:
pet_id = await self.get_new_pet_id()
pet_id_str = str(pet_id)
self.pets[pet_id_str] = {}
self.pets[pet_id_str]['owner'] = user_id
self.pets[pet_id_str]['pet_name'] = pet_name.lower()
self.pets[pet_id_str]['category'] = category.lower()
await self.save()
async def delete_pet(self, pet_id: str) -> None:
pets = self.pets
del pets[pet_id]
await self.save()
async def get_new_pet_id(self) -> int:
if self.pets_last_id is None:
self.pets_last_id = 0
self.pets_last_id += 1
await self.save()
return self.pets_last_id
async def add_new_pet_category(self, category_name: str) -> bool:
categories = self.pets_categories
if category_name in categories:
return False
categories.append(category_name)
await self.save()
return True
async def remove_pet_category(self, category_name: str) -> bool:
categories = self.pets_categories
category_name = category_name.lower()
if category_name not in categories:
return False
categories.remove(category_name)
await self.save()
return True
async def update_church_channel(self, church: str) -> bool:
"""
Updates the kerk_channel
:param kerk: the channel that's been set
:return: True if updated and saved, False if it's the same
"""
church = church.strip("<#")
church = int(church.strip(">"))
if church != self.church_channel:
self.church_channel = church
await self.save()
return True
else:
return False
async def set_church_event(self, sender: str, receiver: str, day: int, culture: str, message:Optional[str] = None):
"""
Adds a kerk_event
:param sender: The person who sent a challenge
:param receiver: The person who's being challenged
:param day: The day the challenge will be sent out
:param culture: The language being used in the bot
:param message: In case the sender wants to add a message to his challenge
"""
info = {}
info["sender"] = sender
info["receiver"] = receiver
info["day"] = day
info["culture"] = culture
info["message"] = message
self.church_event.append(info)
await self.save()
async def remove_church_event(self):
self.church_event.pop(0)
await self.save()
async def update_mod_channel(self, mod_channel: str) -> bool:
self.mod_channel = mod_channel
await self.save()
return True
async def update_youtube_channel_video_id(guild_id: int, youtube_channel_id, latest_video_id):
"""
Sets the video ID of a channel. This is needed so that only a notification is posted
when a new video is uploaded.
:param guild_id: The Guild ID of the youtube list (int)
:param youtube_channel_id: The Youtube channel to be notified for (str)
:param latest_video_id: ID of the latest video (str)
:return: True if updated successfully, False if the channel doesn't exist yet. (bool)
"""
print("update_youtube_channel_video_id")
guild_data = await get_guild_data(guild_id)
if youtube_channel_id in guild_data.youtube_channels.keys():
# youtube channel in list, update video ID and return True
guild_data.youtube_channels[youtube_channel_id][
"latest_video_id"
] = latest_video_id
# TODO: check if file is already being saved?
await guild_data.save()
else:
# youtube channel not in list, return false
return False
async def get_all_guilds_data() -> [GuildData]:
"""
Retrieves the guild data for all guilds.
:returns: List of GuildData objects ([GuildData])
"""
guilds_data = []
for file in listdir(_configFolder):
split_file = path.splitext(file)
if split_file[1] == ".json":
guild_data = await get_guild_data(int(split_file[0]))
guilds_data.append(guild_data)
return guilds_data
async def get_guild_data(guild_id: int) -> GuildData:
"""
Retrieves the guild data for the given guild id.
If possible it will be fetched from the cache, otherwise it will be loaded from the json file
:param guild_id: Guild id (int)
:returns:GuildData object (GuildData)
"""
# check if memory cache contains server config
if guild_id in _guildConfigCache.keys():
return _guildConfigCache[guild_id]
# check if server config file exists
fileName = get_config_file_path(guild_id)
if path.exists(fileName):
# Load data
config = await __read_file(guild_id, fileName)
else:
# Init new instance of ServerData
config = GuildData(guild_id)
_guildConfigCache[guild_id] = config
return config
async def __read_file(guild_id: int, filename: str) -> GuildData:
"""
Read the given json file and parse it to a GuildData object
:param guild_id: Guild Id (int)
:param filename: The name of the file to open (str)
:returns: GuildData object (GuildData)
"""
# TODO: Actually make this async
with open(filename) as config:
data = json.load(config)
guildData = GuildData(guild_id)
guildData.bot_admins = data.get("bot_admins", [])
guildData.notification_lists = data.get("notification_lists", [])
guildData.culture = data.get("culture", "en")
guildData.youtube_channels = data.get("youtube_channels", {})
guildData.purgers = data.get("purgers", {})
guildData.pets = data.get("pets", {})
guildData.pets_last_id = data.get("pets_last_id", None)
guildData.pets_categories = data.get("pets_categories", [])
guildData.mod_channel = data.get("mod_channel",None)
guildData.church_channel = data.get("church_channel", "")
guildData.church_event = data.get("church_event", [])
guildData.member_notification_number = data.get("member_notification_number", DEFAULT_MEMBER_NOTIFICATION_NUMBER)
return guildData
def get_config_file_path(guild_id: int) -> str:
"""
Get the path for the save file for the given guild id
:param guild_id: Guild Id (int)
:return: filepath (str)
"""
return path.join(_configFolder, str(guild_id) + ".json")
| 35.536885
| 121
| 0.626571
|
3ee020e6bd64234041c9e27374abbd30efcef382
| 7,677
|
py
|
Python
|
python/oneflow/test/modules/test_slice.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/test/modules/test_slice.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/test/modules/test_slice.py
|
grybd/oneflow
|
82237ad096a10527591660c09b61444c42917e69
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_slice(test_case, device):
np_arr = np.random.randn(3, 6, 9).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
tup_list = [[None, None, None], [0, 5, 2], [0, 6, 3]]
y = flow.slice(x, slice_tup_list=tup_list)
tmp = np_arr[0:3, 0:5, 0:6]
np_out = tmp[::1, ::2, ::3]
test_case.assertTrue(np.array_equal(y.numpy(), np_out))
def _test_slice_empty(test_case, device):
np_arr = np.random.randn(10).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
y = x[3:3]
test_case.assertTrue(y.shape, flow.Size((0,)))
np_out = np_arr[3:3]
test_case.assertTrue(np.array_equal(y.numpy(), np_out))
def _test_slice_1_dim(test_case, device):
np_arr = np.random.randn(100).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
test_case.assertTrue(np.allclose(x[1].numpy(), np_arr[1], 1e-05, 1e-05))
test_case.assertTrue(np.allclose(x[99].numpy(), np_arr[99], 1e-05, 1e-05))
test_case.assertTrue(np.allclose(x[0:2].numpy(), np_arr[0:2], 1e-05, 1e-05))
def _test_slice_3_dim(test_case, device):
np_arr = np.random.randn(2, 3, 4).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
test_case.assertTrue(np.allclose(x[:, 0].numpy(), np_arr[:, 0], 1e-05, 1e-05))
def _test_slice_4_dim(test_case, device):
np_arr = np.random.randn(5, 3, 6, 9).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
tup_list = [[0, 5, 2], [None, None, None], [0, 5, 2], [0, 6, 3]]
y = flow.slice(x, slice_tup_list=tup_list)
tmp = np_arr[0:5, 0:3, 0:5, 0:6]
np_out = tmp[::2, ::1, ::2, ::3]
test_case.assertTrue(np.array_equal(y.numpy(), np_out))
def _test_slice_with_int_index(test_case, device):
np_arr = np.random.randn(2, 3, 4).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
of_out = x[0, 1:2]
np_out = np_arr[0, 1:2]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
np_arr = np.random.randn(2, 3, 4).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
of_out = x[0, :]
np_out = np_arr[0, :]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
np_arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
of_out = x[0, :, :]
np_out = np_arr[0, :, :]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
np_arr = np.random.randn(2, 3, 4, 5).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
of_out = x[0, :, :, :]
np_out = np_arr[0, :, :, :]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_slice_negative_index(test_case, device):
np_arr = np.random.randn(4, 5, 6)
x = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
test_case.assertTrue(np.allclose(x[-1].numpy(), np_arr[-1], 0.0001, 0.0001))
test_case.assertTrue(np.allclose(x[-2].numpy(), np_arr[-2], 0.0001, 0.0001))
test_case.assertTrue(np.allclose(x[-3].numpy(), np_arr[-3], 0.0001, 0.0001))
test_case.assertTrue(np.allclose(x[-4].numpy(), np_arr[-4], 0.0001, 0.0001))
def _test_slice_ellipsis_type(test_case, device):
np_arr = np.random.randn(2, 3, 4, 5, 6, 7).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device))
of_out = x[..., ::2, ::2, 3:4]
np_out = np_arr[..., ::2, ::2, 3:4]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
of_out = x[..., 1:2, ::2, 1, ::3]
np_out = np_arr[..., 1:2, ::2, 1, ::3]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
of_out = x[0, 2, ..., 1, 1:2]
np_out = np_arr[0, 2, ..., 1, 1:2]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
of_out = x[::2, ..., 1:2]
np_out = np_arr[::2, ..., 1:2]
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_slice_backward(test_case, device):
np_arr = np.random.randn(3, 6, 9).astype(np.float32)
x = flow.tensor(np_arr, device=flow.device(device), requires_grad=True)
tup_list = [[None, None, None], [0, 5, 2], [0, 6, 3]]
y = flow.slice(x, slice_tup_list=tup_list)
z = y.sum()
z.backward()
np_grad = np.zeros((3, 6, 9))
np_grad[0:3, 0:5, 0:6][::1, ::2, ::3] = 1
test_case.assertTrue(np.array_equal(x.grad.numpy(), np_grad))
@flow.unittest.skip_unless_1n1d()
class TestSlice(flow.unittest.TestCase):
def test_slice(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_slice,
_test_slice_empty,
_test_slice_1_dim,
_test_slice_3_dim,
_test_slice_4_dim,
_test_slice_with_int_index,
_test_slice_negative_index,
_test_slice_ellipsis_type,
_test_slice_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@flow.unittest.skip_unless_1n1d()
class TestSliceUpdate(flow.unittest.TestCase):
def test_slice_update(test_case):
x = np.array([1, 1, 1, 1, 1]).astype(np.float32)
input = flow.tensor(x, requires_grad=True)
update = flow.tensor(np.array([2, 3, 4]).astype(np.float32), requires_grad=True)
output = np.array([1.0, 2.0, 3.0, 4.0, 1.0])
y = flow.slice_update(input, update, slice_tup_list=[[1, 4, 1]])
z = y.sum()
z.backward()
test_case.assertTrue(np.array_equal(y.numpy(), output))
np_grad = np.zeros(x.shape)
np_grad[0] = 1
np_grad[4] = 1
test_case.assertTrue(np.array_equal(input.grad.numpy(), np_grad))
test_case.assertTrue(np.array_equal(update.grad.numpy(), np.ones(update.shape)))
@flow.unittest.skip_unless_1n1d()
class TestLogicalSliceAssign(flow.unittest.TestCase):
def test_logical_slice_assign(test_case):
x = np.array([1, 1, 1, 1, 1]).astype(np.float32)
input = flow.tensor(x)
update = flow.tensor(np.array([2, 3, 4]).astype(np.float32))
output = np.array([1.0, 2.0, 3.0, 4.0, 1.0])
flow.logical_slice_assign(input, update, slice_tup_list=[[1, 4, 1]])
test_case.assertTrue(np.array_equal(input.numpy(), output))
def test_logical_slice_assign_negative_index(test_case):
np_arr = np.zeros(shape=(2, 3, 4))
input = flow.tensor(np_arr, dtype=flow.float32)
np_arr[-1] = 1
input[-1] = 1
test_case.assertTrue(np.array_equal(input.numpy(), np_arr))
def test_logical_slice_assign_ellipsis_type(test_case):
np_arr = np.zeros(shape=(2, 3, 4, 5, 6))
input = flow.tensor(np_arr, dtype=flow.float32)
np_arr[0, ::1, ..., 2:3] = 1
input[0, ::1, ..., 2:3] = 1
test_case.assertTrue(np.array_equal(input.numpy(), np_arr))
if __name__ == "__main__":
unittest.main()
| 39.168367
| 88
| 0.644913
|
0d5237a456e6cbd94af1c971ebc5e48280c836b2
| 2,038
|
py
|
Python
|
examples/python/linefinder.py
|
engagementlab/upm
|
9be920dbcd4ff9f333bce27e937a6e72e46899d6
|
[
"MIT"
] | 1
|
2018-11-08T03:59:01.000Z
|
2018-11-08T03:59:01.000Z
|
examples/python/linefinder.py
|
engagementlab/upm
|
9be920dbcd4ff9f333bce27e937a6e72e46899d6
|
[
"MIT"
] | null | null | null |
examples/python/linefinder.py
|
engagementlab/upm
|
9be920dbcd4ff9f333bce27e937a6e72e46899d6
|
[
"MIT"
] | 1
|
2018-11-08T03:59:04.000Z
|
2018-11-08T03:59:04.000Z
|
#!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_linefinder as upmlinefinder
def main():
# Instantiate a line finder sensor on digital pin D2
myLineFinder = upmlinefinder.LineFinder(2)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myLineFinder
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
if (myLineFinder.whiteDetected()):
print("White detected.")
else:
print("Black detected.")
time.sleep(1)
if __name__ == '__main__':
main()
| 37.054545
| 84
| 0.730128
|
ddbc78f3180dc89ceeb3ce9c9d73568d978545b0
| 1,053
|
py
|
Python
|
src/challenges/Arc2-Crypto/04/crypto4.py
|
cetfor/HTHCTF2021
|
fee32b71fca6d94badeea5a161021d5089f53fd7
|
[
"MIT"
] | 3
|
2021-12-11T16:34:20.000Z
|
2021-12-12T13:47:16.000Z
|
src/challenges/Arc2-Crypto/04/crypto4.py
|
cetfor/HTHCTF2021
|
fee32b71fca6d94badeea5a161021d5089f53fd7
|
[
"MIT"
] | null | null | null |
src/challenges/Arc2-Crypto/04/crypto4.py
|
cetfor/HTHCTF2021
|
fee32b71fca6d94badeea5a161021d5089f53fd7
|
[
"MIT"
] | 1
|
2021-12-14T13:20:36.000Z
|
2021-12-14T13:20:36.000Z
|
#!/usr/bin/env python3
import json
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
# Crypto facilities from pycryptodome: `pip3 install pycryptodome`
# https://www.pycryptodome.org/en/latest/src/installation.html
# Credit: http://aes.cryptohack.org/ecb_oracle/
KEY = b"ThisIsAK3yW0w!!1" # length: 16-bytes, 128-bits
FLAG = "HTH{f14g_leak}" # length: 14-bytes
def read_file(file_name):
f = open(file_name, "r")
flag = f.read()
f.close()
return flag
def encrypt(plaintext):
plaintext = bytes(plaintext, encoding='utf8')
padded = pad(plaintext + FLAG.encode(), 16)
cipher = AES.new(KEY, AES.MODE_ECB)
try:
encrypted = cipher.encrypt(padded)
except ValueError as e:
return {"error": str(e)}
return json.dumps({"ciphertext": encrypted.hex()})
def main():
plaintext = input("Nexxus Secure AES ECB Encryption Service. Plaintext: ")
plaintext = plaintext.rstrip("\n").rstrip("\r")
result = encrypt(plaintext)
print(result)
if __name__ == "__main__":
main()
| 27
| 78
| 0.678063
|
f02fa7046dbcedb6343876eee597825fbe3c1d1f
| 155
|
py
|
Python
|
reflect/example/example.py
|
TangJing/reflect
|
e72b07cfd0aec355dc86be75ccecfe49b8d7d02a
|
[
"MIT"
] | null | null | null |
reflect/example/example.py
|
TangJing/reflect
|
e72b07cfd0aec355dc86be75ccecfe49b8d7d02a
|
[
"MIT"
] | null | null | null |
reflect/example/example.py
|
TangJing/reflect
|
e72b07cfd0aec355dc86be75ccecfe49b8d7d02a
|
[
"MIT"
] | null | null | null |
import io
from lib.reflect import reflect
import exampleClass
print(reflect("exampleClass").Instance("testClass").Call("testCall","test reflect example"))
| 31
| 92
| 0.8
|
3b3b45db44f29af9867666c93edb8e038d58b626
| 761
|
py
|
Python
|
src/dump_ast.py
|
sztomi/code-generator
|
f9e1b108664a21728f1dc5b504f8966ea40ee9e0
|
[
"MIT"
] | 96
|
2015-01-14T02:01:10.000Z
|
2021-08-18T10:20:55.000Z
|
src/dump_ast.py
|
sztomi/code-generator
|
f9e1b108664a21728f1dc5b504f8966ea40ee9e0
|
[
"MIT"
] | 3
|
2015-01-15T02:33:40.000Z
|
2015-08-30T20:43:09.000Z
|
src/dump_ast.py
|
sztomi/code-generator
|
f9e1b108664a21728f1dc5b504f8966ea40ee9e0
|
[
"MIT"
] | 29
|
2015-01-22T14:31:32.000Z
|
2020-12-01T21:20:42.000Z
|
#!/usr/bin/python
# vim: set fileencoding=utf-8
import clang.cindex
import asciitree # must be version 0.2
import sys
def node_children(node):
return (c for c in node.get_children() if c.location.file.name == sys.argv[1])
def print_node(node):
text = node.spelling or node.displayname
kind = str(node.kind)[str(node.kind).index('.')+1:]
return '{} {}'.format(kind, text)
if len(sys.argv) != 2:
print("Usage: dump_ast.py [header file name]")
sys.exit()
clang.cindex.Config.set_library_file('/usr/local/lib/libclang.so')
index = clang.cindex.Index.create()
translation_unit = index.parse(sys.argv[1], ['-x', 'c++', '-std=c++11', '-D__CODE_GENERATOR__'])
print(asciitree.draw_tree(translation_unit.cursor, node_children, print_node))
| 30.44
| 96
| 0.69908
|
80418e7e587699821880d9322319cddc7fad5ac7
| 1,563
|
py
|
Python
|
cca-operator/cca-operator.py
|
thejuliekramer/ckan-cloud-docker
|
6b1549824a732393df41bcb4c87f3ccf0430a854
|
[
"MIT"
] | 13
|
2018-11-15T10:58:16.000Z
|
2019-11-03T14:05:00.000Z
|
cca-operator/cca-operator.py
|
thejuliekramer/ckan-cloud-docker
|
6b1549824a732393df41bcb4c87f3ccf0430a854
|
[
"MIT"
] | 46
|
2018-11-08T07:13:17.000Z
|
2020-01-17T13:17:15.000Z
|
cca-operator/cca-operator.py
|
hasadna/ckan-cloud-docker
|
04c8d81751f6f241f357c021e8566403e236d143
|
[
"MIT"
] | 19
|
2018-11-28T14:32:52.000Z
|
2019-11-29T06:21:35.000Z
|
#!/usr/bin/env python3
import os, sys, yaml, datetime
CCA_OPERATOR_ROLE = os.environ['CCA_OPERATOR_ROLE']
ADMIN_ROLES = ['', 'admin']
CONTINUOUS_DEPLOYMENT_ROLES = ADMIN_ROLES + ['continuous-deployment']
def print_stderr(*args):
print(*args, file=sys.stderr)
if sys.argv[1].startswith('patch-deployment ') and CCA_OPERATOR_ROLE in CONTINUOUS_DEPLOYMENT_ROLES:
_, namespace, deployment, container, values_file, backup_dir, image_attrib, image = sys.argv[1].split(' ')
with open(values_file) as f:
values = yaml.load(f)
os.system(f'mkdir -p {backup_dir}')
backup_file = 'values_' + datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%s') + '.yaml'
backup_file = os.path.join(backup_dir, backup_file)
print_stderr(f'modifying values file {values_file}, saving backup to {backup_file}')
with open(backup_file, 'w') as f:
yaml.dump(values, f)
values[image_attrib] = image
with open(values_file, 'w') as f:
yaml.dump(values, f)
if deployment != '' and container != '':
patch_params = f'deployment/{deployment} {container}={image}'
print_stderr(f'patching {patch_params}')
patch_cmd = f'kubectl set image -n {namespace} {patch_params}'
if os.system(f'{patch_cmd} --dry-run') != 0:
print_stderr('dry-run failed')
exit(1)
if os.system(f'{patch_cmd}') != 0:
print_stderr('failed to patch deployment')
exit(1)
print_stderr('Great Success!')
exit(0)
else:
print_stderr('Unexpected Error')
exit(1)
| 35.522727
| 110
| 0.654511
|
086461aa17eed33703812a137788ca8f0e0e5fc8
| 4,916
|
py
|
Python
|
portfolio/settings.py
|
lizschley/number_six
|
a427202397822fca1f49d43d138c24fffdbe95da
|
[
"MIT"
] | 1
|
2020-07-14T20:13:05.000Z
|
2020-07-14T20:13:05.000Z
|
portfolio/settings.py
|
lizschley/number_six
|
a427202397822fca1f49d43d138c24fffdbe95da
|
[
"MIT"
] | 3
|
2021-04-06T20:40:08.000Z
|
2021-06-03T21:54:21.000Z
|
portfolio/settings.py
|
lizschley/number_six
|
a427202397822fca1f49d43d138c24fffdbe95da
|
[
"MIT"
] | null | null | null |
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
# pylint: disable=missing-function-docstring
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('HASH_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
if config('ENVIRONMENT') == 'development':
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
else:
ALLOWED_HOSTS = ['lizschley.com', 'ec2-54-226-45-173.compute-1.amazonaws.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
# other apps
'django_extensions',
'crispy_forms',
'storages',
# portfolio apps
'projects',
'home',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "originals", "scss"),
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'port': config('DB_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
# STATIC_ROOT = os.path.join(BASE_DIR, "static")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
# Following is for S3 implementation
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
# STATICFILES_STORAGE allows collect static automatically put static files in S3.
# May only use when doing initial load and if I update django (to get their css and js changes)
# plan to no longer collect static for my own static files
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3StaticStorage'
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'lizschley-static'
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = 'dirl4bhsg8ywj.cloudfront.net'
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{AWS_LOCATION}/'
# Trouble-shooting
AWS_QUERYSTRING_AUTH = False
AWS_S3_REGION_NAME = 'us-east-1'
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
| 29.261905
| 95
| 0.714809
|
4e45154ec136a41c576a367c642b6765c9b89d63
| 7,027
|
py
|
Python
|
test/pytest/service-desktop/test_outbox.py
|
buk7456/MuditaOS
|
06ef1e131b27b0f397cc615c96d51bede7050423
|
[
"BSL-1.0"
] | 369
|
2021-11-10T09:20:29.000Z
|
2022-03-30T06:36:58.000Z
|
test/pytest/service-desktop/test_outbox.py
|
buk7456/MuditaOS
|
06ef1e131b27b0f397cc615c96d51bede7050423
|
[
"BSL-1.0"
] | 149
|
2021-11-10T08:38:35.000Z
|
2022-03-31T23:01:52.000Z
|
test/pytest/service-desktop/test_outbox.py
|
buk7456/MuditaOS
|
06ef1e131b27b0f397cc615c96d51bede7050423
|
[
"BSL-1.0"
] | 41
|
2021-11-10T08:30:37.000Z
|
2022-03-29T08:12:46.000Z
|
# Copyright (c) 2017-2022, Mudita Sp. z.o.o. All rights reserved.
# For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md
import time
import pytest
from harness.api.contacts import AddContact, DeleteContactById
from harness.api.messages import AddMessage, DeleteMessageById
from harness.api.outbox import NotificationEntry, NotificationType, NotificationChange, GetNotifications, \
DeleteNotifications
from harness.request import TransactionError
class OutboxTester:
def __init__(self, harness):
self.harness = harness
def get_notifications(self):
try:
notifications = GetNotifications().run(self.harness).entries
except TransactionError:
return False
else:
return True, notifications
def delete_notifications(self, entries: list):
try:
DeleteNotifications(entries).run(self.harness)
except TransactionError:
return False
else:
return True
def add_message(self, message_number, message_body):
try:
message = AddMessage(message_number, message_body).run(self.harness).message
except TransactionError:
return False
else:
return True, message
def delete_message_by_id(self, message_record_id):
try:
DeleteMessageById(message_record_id).run(self.harness)
except TransactionError:
return False
else:
return True
def add_contact(self, contact_record):
try:
contact_id = AddContact(contact_record).run(self.harness).id
except TransactionError:
return False
else:
return True, contact_id
def delete_contact_by_id(self, contact_record_id):
try:
DeleteContactById(contact_record_id).run(self.harness)
except TransactionError:
return False
else:
return True
@pytest.mark.service_desktop_test
@pytest.mark.usefixtures("phone_unlocked")
def test_getting_notifications(harness):
outbox_tester = OutboxTester(harness)
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
assert not received_notifications, "Notification list is not empty at the beginning of the test!"
# Add message to generate "message created" and "thread created" notifications
message_number = "123456789"
message_body = "Hello, how are you?"
result, message_record = outbox_tester.add_message(message_number, message_body)
assert result, "Failed to add message!"
# Add contact to generate "contact created" notification
contact_record = {
"address": "6 Czeczota St.\n02600 Warsaw",
"altName": "Smith",
"email": "john.smith@mudita.com",
"blocked": False,
"favourite": False,
"ice": False,
"numbers": [
"123456789"
],
"speedDial": "1",
"priName": "John",
"note": "Some note"
}
result, contact_id = outbox_tester.add_contact(contact_record)
assert result, "Failed to add contact!"
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
# Check if all notifications are present
found_message_notification = False
for notification in received_notifications:
if notification.type == NotificationType.MESSAGE and notification.change == NotificationChange.CREATED \
and notification.record_id == message_record["messageID"]:
found_message_notification = True
break
found_thread_notification = False
for notification in received_notifications:
if notification.type == NotificationType.THREAD and notification.change == NotificationChange.CREATED:
found_thread_notification = True
break
found_contacts_notification = False
for notification in received_notifications:
if notification.type == NotificationType.CONTACT and notification.change == NotificationChange.CREATED \
and notification.record_id == contact_id:
found_contacts_notification = True
break
assert found_message_notification and found_thread_notification and found_contacts_notification
# Wait for 20 seconds to be sure that notifications are cleaned by timer
time.sleep(20)
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
assert not received_notifications, "Notification list is not empty after timeout!"
assert outbox_tester.delete_message_by_id(message_record["messageID"]), "Failed to delete a message!"
assert outbox_tester.delete_contact_by_id(contact_id), "Failed to delete a contact!"
@pytest.mark.service_desktop_test
@pytest.mark.usefixtures("phone_unlocked")
def test_deleting_notifications(harness):
outbox_tester = OutboxTester(harness)
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
assert not received_notifications, "Notification list is not empty at the beginning of the test!"
# Add message to generate "message created" and "thread created" notifications
message_number = "123456789"
message_body = "Hello, how are you?"
result, message_record = outbox_tester.add_message(message_number, message_body)
assert result, "Failed to add message!"
# Add contact to generate "contact created" notification
contact_record = {
"address": "6 Czeczota St.\n02600 Warsaw",
"altName": "Smith",
"email": "john.smith@mudita.com",
"blocked": False,
"favourite": False,
"ice": False,
"numbers": [
"123456789"
],
"speedDial": "1",
"priName": "John",
"note": "Some note"
}
result, contact_id = outbox_tester.add_contact(contact_record)
assert result, "Failed to add contact!"
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
uids_of_notifications_to_be_deleted = []
for notification in received_notifications:
uids_of_notifications_to_be_deleted.append(notification.uid)
result = outbox_tester.delete_notifications(uids_of_notifications_to_be_deleted)
assert result, "Failed to get notifications!"
# Check if all generated notifications are deleted
result, received_notifications = outbox_tester.get_notifications()
assert result, "Failed to get notifications!"
for notification in received_notifications:
assert notification.uid not in uids_of_notifications_to_be_deleted, "Notification not deleted!"
assert outbox_tester.delete_message_by_id(message_record["messageID"]), "Failed to delete a message!"
assert outbox_tester.delete_contact_by_id(contact_id), "Failed to delete a contact!"
| 37.77957
| 112
| 0.700868
|
c7bde57b0ebc5a8c49a5b0b2a9fc8e174fc70aa8
| 2,103
|
py
|
Python
|
niscv_v2/analysis/real/garch_estimate.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
niscv_v2/analysis/real/garch_estimate.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
niscv_v2/analysis/real/garch_estimate.py
|
IanFla/Importance-Sampling
|
f2dd2164e95377d2cf025fcddd19b2592394e4d7
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
import pickle
def read(num):
data = []
for n in np.arange(1, num + 1):
file = open('../../data/real/garch_estimate_{}'.format(n), 'rb')
data.append(pickle.load(file))
return np.vstack(data)
def main():
plt.style.use('ggplot')
file = open('../../data/real/truth', 'rb')
truth = np.array(pickle.load(file)).reshape([1, 6, 1])
file = open('../../data/real/time', 'rb')
time = pickle.load(file)
data = read(30)
mean = np.mean(data, axis=0)
# print(mean)
# estimators = ['NIS', 'DNIS', 'DNIS$^*$', 'REG', 'MLE']
estimators = ['NIS', 'DNIS---', 'DIS', 'REG', 'MLE']
colors = ['y', 'y', 'g', 'r', 'b']
scenarios = ['(1, 0.05)', '(1, 0.01)', '(2, 0.05)', '(2, 0.01)', '(5, 0.05)', '(5, 0.01)']
nMSE = 400000 * np.mean((data - truth) ** 2, axis=0)
print(np.round(nMSE, 4))
nMSE_time = nMSE * time.T
print(np.round(nMSE_time), 4)
nVar = 400000 * np.var(data, axis=0)
nMSE_time = nMSE_time[:, 1:] / nMSE_time[:, 0].reshape([-1, 1])
nVar = nVar[:, 1:] / nMSE[:, 0].reshape([-1, 1])
nMSE = nMSE[:, 1:] / nMSE[:, 0].reshape([-1, 1])
fig, ax = plt.subplots(1, 2, figsize=[10, 3])
for i, est in enumerate(estimators):
if est != 'DNIS---':
ax[0].semilogy(scenarios, nMSE[:, i], c=colors[i], label=est)
ax[0].semilogy(scenarios, nVar[:, i], '.', c=colors[i])
ax[1].semilogy(scenarios, nMSE_time[:, i], c=colors[i], label=est)
ax[0].set_xlabel(r'$(D,\alpha)$')
ax[1].set_xlabel(r'$(D,\alpha)$')
ax[0].set_ylabel('$\mathrm{MSE}_\mathrm{IIS}$ or $\mathrm{Var}_\mathrm{IIS}$')
ax[1].set_ylabel(r'$(\mathrm{MSE}\times\mathrm{Time})_\mathrm{IIS}$')
# ax[0].set_ylabel('statistical performance')
# ax[1].set_ylabel('overall performance')
for a in ax:
a.legend(loc=2)
a.grid(axis='x', which='major')
a.grid(axis='both', which='both')
fig.tight_layout()
fig.show()
# print(nMSE[:, 1:] / nMSE[:, :-1])
if __name__ == '__main__':
main()
| 33.380952
| 94
| 0.541607
|
f2649f7455dfedfbe1fbad7b3cd74e58ccb23087
| 5,032
|
py
|
Python
|
cpi/metrics_calculator.py
|
ThinkOpenly/cpi-breakdown
|
f5a949aba068062c6c622c50feb193d8404ed08d
|
[
"Apache-2.0"
] | null | null | null |
cpi/metrics_calculator.py
|
ThinkOpenly/cpi-breakdown
|
f5a949aba068062c6c622c50feb193d8404ed08d
|
[
"Apache-2.0"
] | null | null | null |
cpi/metrics_calculator.py
|
ThinkOpenly/cpi-breakdown
|
f5a949aba068062c6c622c50feb193d8404ed08d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017,2019 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* Rafael Peria Sene <rpsene@br.ibm.com>
* Matheus Castanho <mscastanho@ibm.com>
"""
import os
import re
import sys
from collections import defaultdict
from math import fabs
import yaml
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
class MetricsCalculator():
'''
Class that calculates metrics
'''
metrics_groups = []
def __init__(self, processor):
metrics_file = DIR_PATH + "/metrics/" + str.lower(processor) + ".yaml"
self.metrics_groups = self.__read_metrics(metrics_file)
@staticmethod
def __read_metrics(metrics_file):
""" Get the metrics based on the processor version. They are located
at /metrics/<processor_model>.yaml. It returns a dictionary which
contains the NAME an the EQUATION """
try:
with open(metrics_file, "r") as metrics:
return yaml.load(metrics, Loader=yaml.FullLoader)
except IOError:
sys.stderr.write("Could not find file '{}'. Check if your "
"installation is correct or try to install "
"cpi again.\n".format(metrics_file))
sys.exit(1)
def get_raw_metrics(self):
'''Return the raw metrics collect from its file'''
return self.metrics_groups
def calculate_metrics(self, parsed_output_dict):
'''
Calculate the metrics based on the processor model and returns a list
of list which contains:
[
[METRIC_NAME_1, METRIC_RESULT_1, METRIC_PERCENT_1],
[METRIC_NAME_2, METRIC_RESULT_2, METRIC_PERCENT_2],
...
]
It receives a dictonary with the parsed output of the execution.
This dict content is <EVENT> : <VALUE> like:
PM_CMPLU_STALL_THRD : 55322
PM_CMPLU_STALL_BRU_CRU : 25701
PM_CMPLU_STALL_COQ_FULL : 178
PM_CMPLU_STALL_BRU : 16138
'''
parsed_output = defaultdict(list)
parsed_output = parsed_output_dict
metrics_results = []
if int(parsed_output.get('PM_RUN_INST_CMPL')[0]) > 0:
for group in self.metrics_groups.values():
result_tmp = []
# Split the metrics in all components to allow replacing
# the events with the calculated values.
# For example, the metric:
# PM_CMPLU_STALL_DMISS_L3MISS - (PM_CMPLU_STALL_DMISS_LMEM + \
# PM_CMPLU_STALL_DMISS_L21_L31 + PM_CMPLU_STALL_DMISS_REMOTE)
# Becomes:
# [PM_CMPLU_STALL_DMISS_L3MISS, -, (, PM_CMPLU_STALL_DMISS_LMEM,\
# +, PM_CMPLU_STALL_DMISS_L21_L31, +, \
# PM_CMPLU_STALL_DMISS_REMOTE, )]
calc_function = re.split("([+-/*/(/)//])",
group['FORMULA'].replace(" ", ""))
for parameter in calc_function:
# If we find the event in the parsed output, it is
# replaced by its value.
if parameter in parsed_output:
prm = 'float(' + parsed_output.get(parameter) + ')'
calc_function[calc_function.index(parameter)] = prm
# Once the events are replaced by its values in the metric,
# we put it all togheter again and calculate the metric
metric = ''.join(calc_function)
metric_result = eval(metric)
result_tmp.append(group["NAME"])
if metric_result > 0:
result_tmp.append("%.3f" % metric_result)
cmd = ('(float(metric_result)/(float(parsed_output.get'
'(\'PM_RUN_CYC\'))/float(parsed_output.get'
'(\'PM_RUN_INST_CMPL\'))))*100')
result_tmp.append("%.2f" % eval(cmd))
else:
result_tmp.append(0)
result_tmp.append(fabs(0))
metrics_results.append(result_tmp)
return metrics_results
else:
sys.stderr.write("PM_RUN_INST_CMPL is 0.")
sys.stderr.write("As it is the base divisor for all metrics \
calculation it can not be 0. \
Please run CPI again.")
sys.exit(1)
| 41.245902
| 81
| 0.583267
|
e2a027e396493a8bf70ecfebe5267d508572fab6
| 8,284
|
py
|
Python
|
variational_principle/compute/compute.py
|
Tiernan8r/Variational-Principle
|
e5f82956c3bfd57f7d73c7220ac2cdd9c8f09935
|
[
"MIT"
] | 1
|
2020-05-21T08:10:24.000Z
|
2020-05-21T08:10:24.000Z
|
variational_principle/compute/compute.py
|
Tiernan8r/Variational_Principle
|
e5f82956c3bfd57f7d73c7220ac2cdd9c8f09935
|
[
"MIT"
] | 33
|
2020-01-21T12:02:06.000Z
|
2020-06-23T16:50:57.000Z
|
variational_principle/compute/compute.py
|
Tiernan8r/Variational_Principle
|
e5f82956c3bfd57f7d73c7220ac2cdd9c8f09935
|
[
"MIT"
] | 1
|
2022-03-17T01:50:25.000Z
|
2022-03-17T01:50:25.000Z
|
import random
import time
import numpy as np
from scipy.linalg import null_space
from variational_principle.differentiation.laplacian import generate_laplacian
import variational_principle.potentials.potential as pot
from variational_principle import ENERGY_FACTOR
# The Lagrangian Derivative matrix
global DEV2
def normalise(psi: np.ndarray, dr: float) -> np.ndarray:
"""
The function takes in a non-normalised psi wavefunction, and returns the normalised version of it.
:param psi: The wavefunction to normalise.
:param dr: The grid spacing of the wavefunction.
:return: The normalised wavefunction
"""
# integrate using the rectangular rule
norm = (psi * psi).sum() * dr
# Since psi is displayed as |psi|^2, take the sqrt of the norm
norm_psi = psi / np.sqrt(norm)
return norm_psi
def energy(psi: np.ndarray, V: np.ndarray, dr: float) -> float:
"""
Calculates the energy eigenvalue of a given wavefunction psi in a given potential system V.
:param psi: The wavefunction in the system.
:param V: The potential function of the system.
:param dr: The grid spacing in the system.
:return: The energy eigenvalue E.
"""
# when V is inf, wil get an invalid value error at runtime, not an issue, is sorted in filtering below:
Vp = V * psi
# filter out nan values in Vp
Vp = np.where(np.isfinite(Vp), Vp, 0)
# Calculate the kinetic energy of the system
# DEV2 is the lagrangian 2nd derivative matrix.
Tp = ENERGY_FACTOR * (DEV2 @ psi)
# Return the integral of the KE and PE applied to psi, which is the energy.
return (psi * (Tp + Vp)).sum() * dr
def nth_state(r: np.ndarray, dr: float, D: int, N: int, num_iterations: int,
prev_psi_linear: np.ndarray, n: int) -> np.ndarray:
"""
Calculates the nth psi energy eigenstate wavefunction of a given potential system.
:param r: The grid coordinates.
:param dr: The grid spacing.
:param D: The number of axes in the system.
:param N: The size of each axis.
:param num_iterations: The number of iterations to calculate over.
:param prev_psi_linear: The previous calculated psi states for the potential system.
:param n: The order of the state.
:return: The energy eigenstate wavefunction psi of order n for the potential system.
"""
# Get the time that calculations start at.
t1 = time.time()
# Get the orthonormal basis for this state, by finding the null space if the previous lower order psi
orthonormal_basis = null_space(prev_psi_linear).T
# Set a seed for repeatable results.
random.seed("THE-VARIATIONAL-PRINCIPLE")
# Calculate the potential of the system.
V = pot.potential(r)
# turn the potential grid into a linear column vector for linear algebra purposes.
V = V.reshape(N ** D)
# generate an initial psi, I've found that a quadratic function works nicely (no discontinuities.)
psi = (0.5 * r ** 2).sum(axis=0)
# psi = np.ones(r.shape).sum(axis=0)
# linearise psi from a grid to a column vector
psi = psi.reshape(N ** D)
# Account for infinite values in the potential:
len_V = len(V)
# Keep track of all the indices that have an inf value for the V.
nan_indices = [False] * len_V
for j in range(len_V):
# # Tag the bordering points as well.
# a, b = j - 1, j + 1
# if a < 0:
# a = 0
# if b >= len_V:
# b = len_V - 1
#
# if not np.isfinite(V[j]) and (not np.isfinite(V[a]) and not np.isfinite(V[b])):
# # nan_indices[a] = nan_indices[j] = nan_indices[b] = True
# nan_indices[j] = True
if not np.isfinite(V[j]):
nan_indices[j] = True
# filter the corresponding psi values to be = 0
psi = np.where(nan_indices, 0, psi)
# filter the values in the orthonormal basis to be 0
for j in range(n - 1):
nan_indices[j] = False
orthonormal_basis = np.where(nan_indices, 0, orthonormal_basis)
# get a default initial energy to compare against.
prev_E = energy(psi, V, dr)
# Keep track of the number of orthonormal bases that there are.
num_bases = len(orthonormal_basis)
# loop for the desired number of iterations
for i in range(num_iterations):
# generate a random orthonormal basis to sample.
rand_index = random.randrange(num_bases)
# generate a random value to change by that converges to 0 as we sample more.
rand_change = random.random() * 0.1 * (num_iterations - i) / num_iterations
# 50% of the time, add, the other 50% take away
if random.random() > 0.5:
rand_change *= -1
# get the orthonormal basis that we are sampling with
basis_vector = orthonormal_basis[rand_index]
# tweak the psi wavefunction by the generated change, with the given basis.
psi += basis_vector * rand_change
# re normalise the changed psi
psi = normalise(psi, dr)
# get the corresponding new energy for the changed psi
new_E = energy(psi, V, dr)
# if the new energy is lower than the current energy, keep the change.
if new_E < prev_E:
prev_E = new_E
# otherwise set psi back to the way it was before the change.
else:
psi -= basis_vector * rand_change
psi = normalise(psi, dr)
# Display the final energy of the wavefunction to the console.
print("Final Energy:", energy(psi, V, dr))
# calculate how long the computation took.
t2 = time.time()
print("The time for the " + str(n) + "th iteration is:", t2 - t1, "s.\n")
# turn psi back from a column vector to a grid.
psi = psi.reshape([N] * D)
# Correction of phase, to bring it to the positive for nicer plotting.
phase = np.sum(psi) * dr
if phase < 0:
psi *= -1
# return the generated psi as a grid.
return psi
def compute(start=-10, stop=10, N=100, D=1, num_states=1, num_iterations=10 ** 5):
"""
The method to set up the variables and system, and aggregate the computed wavefunctions.
:param start: The lower bound of the grid.
:param stop: The upper bound of the grid.
:param N: The number of samples along an axis.
:param D: The number of dimensions.
:param num_states: The number of wavefunctions to compute.
:param num_iterations: The number of iterations per computation.
:return: r, V, all_psi: the grid, potential function and the list of all the wavefunctions.
"""
# Keep the number of states in bounds, so that the orthonormal basis generator doesn't return an error.
if num_states >= N:
num_states = N - 2
# The coordinates along the x axis
x = np.linspace(start, stop, N)
# The axes along each dimension
axes = [x] * D
# populate the grid using the axes.
r = np.array(np.meshgrid(*axes, indexing="ij"))
# generate the potential for the system
V = pot.potential(r)
# Calculate the grid spacing for the symmetric grid.
dr = (stop - start) / N
# Generate the 2nd order finite difference derivative matrix.
global DEV2
DEV2 = generate_laplacian(D, N, dr)
# Keep track whether we are on the first iteration or not.
first_iteration = True
# Set up two arrays to store the generated psi:
# Stores the psi as linear column vectors, used for calculating the next psi in the series.
all_psi_linear = np.zeros((1, N ** D))
# stores in their proper shape as grids, used for plotting.
all_psi = np.zeros((1, N * D))
# iterate over the number of states we want to generate psi for.
for i in range(num_states):
# Generate the psi for this order number
psi = nth_state(r, dr, D, N, num_iterations, all_psi_linear, i + 1)
# Store the generated psi in both ways in their corresponding arrays.
psi_linear = psi.reshape(N ** D)
if first_iteration:
all_psi_linear = np.array([psi_linear])
all_psi = np.array([psi])
first_iteration = False
else:
all_psi_linear = np.vstack((all_psi_linear, [psi_linear]))
all_psi = np.vstack((all_psi, [psi]))
return r, V, all_psi
| 37.484163
| 107
| 0.655118
|
d3462810b5604ab14f042abf6673f3238946632c
| 13,371
|
py
|
Python
|
corporate/views/upgrade.py
|
fatihCinarKrtg/zulip
|
9c96e030dddae71bd7d79bc3990373bfddc76d0e
|
[
"Apache-2.0"
] | 1
|
2021-06-25T18:10:20.000Z
|
2021-06-25T18:10:20.000Z
|
corporate/views/upgrade.py
|
fatihCinarKrtg/zulip
|
9c96e030dddae71bd7d79bc3990373bfddc76d0e
|
[
"Apache-2.0"
] | 10
|
2021-11-15T17:53:29.000Z
|
2022-02-27T13:51:47.000Z
|
corporate/views/upgrade.py
|
fatihCinarKrtg/zulip
|
9c96e030dddae71bd7d79bc3990373bfddc76d0e
|
[
"Apache-2.0"
] | 1
|
2020-12-29T17:22:00.000Z
|
2020-12-29T17:22:00.000Z
|
import logging
from decimal import Decimal
from typing import Any, Dict, Optional
import stripe
from django import forms
from django.conf import settings
from django.core import signing
from django.db import transaction
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from corporate.lib.stripe import (
DEFAULT_INVOICE_DAYS_UNTIL_DUE,
MIN_INVOICED_LICENSES,
BillingError,
compute_plan_parameters,
ensure_realm_does_not_have_active_plan,
get_latest_seat_count,
is_free_trial_offer_enabled,
is_sponsored_realm,
process_initial_upgrade,
sign_string,
unsign_string,
update_or_create_stripe_customer,
update_sponsorship_status,
validate_licenses,
)
from corporate.lib.support import get_support_url
from corporate.models import (
CustomerPlan,
PaymentIntent,
Session,
ZulipSponsorshipRequest,
get_current_plan_by_customer,
get_customer_by_realm,
)
from corporate.views.billing_page import billing_home
from zerver.decorator import require_organization_member, zulip_login_required
from zerver.lib.actions import do_make_user_billing_admin
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.send_email import FromAddress, send_email
from zerver.lib.validator import check_bool, check_int, check_string_in
from zerver.models import Realm, UserProfile, get_org_type_display_name
billing_logger = logging.getLogger("corporate.stripe")
VALID_BILLING_MODALITY_VALUES = ["send_invoice", "charge_automatically"]
VALID_BILLING_SCHEDULE_VALUES = ["annual", "monthly"]
VALID_LICENSE_MANAGEMENT_VALUES = ["automatic", "manual"]
def unsign_seat_count(signed_seat_count: str, salt: str) -> int:
try:
return int(unsign_string(signed_seat_count, salt))
except signing.BadSignature:
raise BillingError("tampered seat count")
def check_upgrade_parameters(
billing_modality: str,
schedule: str,
license_management: Optional[str],
licenses: Optional[int],
seat_count: int,
) -> None:
if billing_modality not in VALID_BILLING_MODALITY_VALUES: # nocoverage
raise BillingError("unknown billing_modality", "")
if schedule not in VALID_BILLING_SCHEDULE_VALUES: # nocoverage
raise BillingError("unknown schedule")
if license_management not in VALID_LICENSE_MANAGEMENT_VALUES: # nocoverage
raise BillingError("unknown license_management")
validate_licenses(billing_modality == "charge_automatically", licenses, seat_count)
def setup_upgrade_checkout_session_and_payment_intent(
user: UserProfile,
seat_count: int,
licenses: int,
license_management: str,
billing_schedule: int,
billing_modality: str,
onboarding: bool,
) -> stripe.checkout.Session:
customer = update_or_create_stripe_customer(user)
assert customer is not None # for mypy
free_trial = is_free_trial_offer_enabled()
_, _, _, price_per_license = compute_plan_parameters(
CustomerPlan.STANDARD,
license_management == "automatic",
billing_schedule,
customer.default_discount,
free_trial,
)
metadata = {
"billing_modality": billing_modality,
"billing_schedule": billing_schedule,
"licenses": licenses,
"license_management": license_management,
"price_per_license": price_per_license,
"seat_count": seat_count,
"type": "upgrade",
"user_email": user.delivery_email,
"realm_id": user.realm.id,
"realm_str": user.realm.string_id,
}
if free_trial:
if onboarding:
session_type = Session.FREE_TRIAL_UPGRADE_FROM_ONBOARDING_PAGE
else:
session_type = Session.FREE_TRIAL_UPGRADE_FROM_BILLING_PAGE
payment_intent = None
else:
session_type = Session.UPGRADE_FROM_BILLING_PAGE
stripe_payment_intent = stripe.PaymentIntent.create(
amount=price_per_license * licenses,
currency="usd",
customer=customer.stripe_customer_id,
description=f"Upgrade to Zulip Standard, ${price_per_license/100} x {licenses}",
receipt_email=user.delivery_email,
confirm=False,
statement_descriptor="Zulip Standard",
metadata=metadata,
)
payment_intent = PaymentIntent.objects.create(
customer=customer,
stripe_payment_intent_id=stripe_payment_intent.id,
status=PaymentIntent.get_status_integer_from_status_text(stripe_payment_intent.status),
)
stripe_session = stripe.checkout.Session.create(
cancel_url=f"{user.realm.uri}/upgrade/",
customer=customer.stripe_customer_id,
mode="setup",
payment_method_types=["card"],
metadata=metadata,
setup_intent_data={"metadata": metadata},
success_url=f"{user.realm.uri}/billing/event_status?stripe_session_id={{CHECKOUT_SESSION_ID}}",
)
session = Session.objects.create(
customer=customer, stripe_session_id=stripe_session.id, type=session_type
)
if payment_intent is not None:
session.payment_intent = payment_intent
session.save(update_fields=["payment_intent"])
return stripe_session
@require_organization_member
@has_request_variables
def upgrade(
request: HttpRequest,
user: UserProfile,
billing_modality: str = REQ(str_validator=check_string_in(VALID_BILLING_MODALITY_VALUES)),
schedule: str = REQ(str_validator=check_string_in(VALID_BILLING_SCHEDULE_VALUES)),
signed_seat_count: str = REQ(),
salt: str = REQ(),
onboarding: bool = REQ(default=False, json_validator=check_bool),
license_management: Optional[str] = REQ(
default=None, str_validator=check_string_in(VALID_LICENSE_MANAGEMENT_VALUES)
),
licenses: Optional[int] = REQ(json_validator=check_int, default=None),
) -> HttpResponse:
ensure_realm_does_not_have_active_plan(user.realm)
try:
seat_count = unsign_seat_count(signed_seat_count, salt)
if billing_modality == "charge_automatically" and license_management == "automatic":
licenses = seat_count
if billing_modality == "send_invoice":
schedule = "annual"
license_management = "manual"
check_upgrade_parameters(
billing_modality, schedule, license_management, licenses, seat_count
)
assert licenses is not None and license_management is not None
automanage_licenses = license_management == "automatic"
charge_automatically = billing_modality == "charge_automatically"
billing_schedule = {"annual": CustomerPlan.ANNUAL, "monthly": CustomerPlan.MONTHLY}[
schedule
]
if charge_automatically:
stripe_checkout_session = setup_upgrade_checkout_session_and_payment_intent(
user,
seat_count,
licenses,
license_management,
billing_schedule,
billing_modality,
onboarding,
)
return json_success(
data={
"stripe_session_url": stripe_checkout_session.url,
"stripe_session_id": stripe_checkout_session.id,
}
)
else:
process_initial_upgrade(
user,
licenses,
automanage_licenses,
billing_schedule,
False,
is_free_trial_offer_enabled(),
)
return json_success(data={})
except BillingError as e:
billing_logger.warning(
"BillingError during upgrade: %s. user=%s, realm=%s (%s), billing_modality=%s, "
"schedule=%s, license_management=%s, licenses=%s",
e.error_description,
user.id,
user.realm.id,
user.realm.string_id,
billing_modality,
schedule,
license_management,
licenses,
)
raise e
except Exception:
billing_logger.exception("Uncaught exception in billing:", stack_info=True)
error_message = BillingError.CONTACT_SUPPORT.format(email=settings.ZULIP_ADMINISTRATOR)
error_description = "uncaught exception during upgrade"
raise BillingError(error_description, error_message)
@zulip_login_required
@has_request_variables
def initial_upgrade(
request: HttpRequest, onboarding: bool = REQ(default=False, json_validator=check_bool)
) -> HttpResponse:
user = request.user
assert user.is_authenticated
if not settings.BILLING_ENABLED or user.is_guest:
return render(request, "404.html", status=404)
billing_page_url = reverse(billing_home)
customer = get_customer_by_realm(user.realm)
if customer is not None and (
get_current_plan_by_customer(customer) is not None or customer.sponsorship_pending
):
if onboarding:
billing_page_url = f"{billing_page_url}?onboarding=true"
return HttpResponseRedirect(billing_page_url)
if is_sponsored_realm(user.realm):
return HttpResponseRedirect(billing_page_url)
percent_off = Decimal(0)
if customer is not None and customer.default_discount is not None:
percent_off = customer.default_discount
seat_count = get_latest_seat_count(user.realm)
signed_seat_count, salt = sign_string(str(seat_count))
context: Dict[str, Any] = {
"realm": user.realm,
"email": user.delivery_email,
"seat_count": seat_count,
"signed_seat_count": signed_seat_count,
"salt": salt,
"min_invoiced_licenses": max(seat_count, MIN_INVOICED_LICENSES),
"default_invoice_days_until_due": DEFAULT_INVOICE_DAYS_UNTIL_DUE,
"plan": "Zulip Standard",
"free_trial_days": settings.FREE_TRIAL_DAYS,
"onboarding": onboarding,
"page_params": {
"seat_count": seat_count,
"annual_price": 8000,
"monthly_price": 800,
"percent_off": float(percent_off),
},
"realm_org_type": user.realm.org_type,
"sorted_org_types": sorted(
(
[org_type_name, org_type]
for (org_type_name, org_type) in Realm.ORG_TYPES.items()
if not org_type.get("hidden")
),
key=lambda d: d[1]["display_order"],
),
}
response = render(request, "corporate/upgrade.html", context=context)
return response
class SponsorshipRequestForm(forms.Form):
website = forms.URLField(max_length=ZulipSponsorshipRequest.MAX_ORG_URL_LENGTH, required=False)
organization_type = forms.IntegerField()
description = forms.CharField(widget=forms.Textarea)
@require_organization_member
@has_request_variables
def sponsorship(
request: HttpRequest,
user: UserProfile,
organization_type: str = REQ("organization-type"),
website: str = REQ(),
description: str = REQ(),
) -> HttpResponse:
realm = user.realm
requested_by = user.full_name
user_role = user.get_role_name()
support_url = get_support_url(realm)
post_data = request.POST.copy()
# We need to do this because the field name in the template
# for organization type contains a hyphen and the form expects
# an underscore.
post_data.update(organization_type=organization_type)
form = SponsorshipRequestForm(post_data)
if form.is_valid():
with transaction.atomic():
sponsorship_request = ZulipSponsorshipRequest(
realm=realm,
requested_by=user,
org_website=form.cleaned_data["website"],
org_description=form.cleaned_data["description"],
org_type=form.cleaned_data["organization_type"],
)
sponsorship_request.save()
org_type = form.cleaned_data["organization_type"]
if realm.org_type != org_type:
realm.org_type = org_type
realm.save(update_fields=["org_type"])
update_sponsorship_status(realm, True, acting_user=user)
do_make_user_billing_admin(user)
org_type_display_name = get_org_type_display_name(org_type)
context = {
"requested_by": requested_by,
"user_role": user_role,
"string_id": realm.string_id,
"support_url": support_url,
"organization_type": org_type_display_name,
"website": website,
"description": description,
}
send_email(
"zerver/emails/sponsorship_request",
to_emails=[FromAddress.SUPPORT],
from_name="Zulip sponsorship",
from_address=FromAddress.tokenized_no_reply_address(),
reply_to_email=user.delivery_email,
context=context,
)
return json_success()
else:
messages = []
for error_list in form.errors.get_json_data().values():
for error in error_list:
messages.append(error["message"])
message = " ".join(messages)
raise BillingError("Form validation error", message=message)
| 36.433243
| 103
| 0.676913
|
7d879923f603ebb23e1c3d59a5fb8523ac78ca54
| 2,609
|
py
|
Python
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/experiments.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 3
|
2017-04-05T18:01:59.000Z
|
2019-05-03T14:15:31.000Z
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/experiments.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 6
|
2019-02-27T15:45:58.000Z
|
2021-01-12T15:18:50.000Z
|
galaxy/coralsnp_reports/lib/galaxy/webapps/coralsnp_reports/controllers/experiments.py
|
skitchen19/galaxy_tools
|
b935f36cfe430263564503ebb71f78dc79315acb
|
[
"MIT"
] | 2
|
2018-10-26T18:36:39.000Z
|
2019-01-28T15:12:39.000Z
|
import logging
import sqlalchemy as sa
from markupsafe import escape
import galaxy.model
from galaxy import util
from . import BaseUIController
from galaxy.web.base.controller import web
from galaxy.webapps.reports.controllers.query import ReportQueryBuilder
log = logging.getLogger(__name__)
class Experiments(BaseUIController, ReportQueryBuilder):
@web.expose
def all(self, trans, **kwd):
message = escape(util.restore_text(kwd.get('message', '')))
q = sa.select((galaxy.model.corals.Experiment.table.c.id,
galaxy.model.corals.Experiment.table.c.seq_facility,
galaxy.model.corals.Experiment.table.c.array_version,
galaxy.model.corals.Experiment.table.c.result_folder_name,
galaxy.model.corals.Experiment.table.c.plate_barcode),
from_obj=[galaxy.model.corals.Experiment.table],
order_by=[galaxy.model.corals.Experiment.table.c.id])
experiments = []
for row in q.execute():
cols_tup = (row.id, row.seq_facility, row.array_version,
row.result_folder_name, row.plate_barcode)
experiments.append(cols_tup)
return trans.fill_template('/webapps/coralsnp_reports/experiments.mako', experiments=experiments, message=message)
@web.expose
def of_sample(self, trans, **kwd):
message = escape(util.restore_text(kwd.get('message', '')))
affy_id = kwd.get('affy_id')
experiment_id = kwd.get('experiment_id')
q = sa.select((galaxy.model.corals.Experiment.table.c.seq_facility,
galaxy.model.corals.Experiment.table.c.array_version,
galaxy.model.corals.Experiment.table.c.result_folder_name,
galaxy.model.corals.Experiment.table.c.plate_barcode),
whereclause=sa.and_(galaxy.model.corals.Experiment.table.c.id == experiment_id),
from_obj=[galaxy.model.corals.Experiment.table],
order_by=[galaxy.model.corals.Experiment.table.c.id])
experiments = []
for row in q.execute():
cols_tup = (row.seq_facility, row.array_version,
row.result_folder_name, row.plate_barcode)
experiments.append(cols_tup)
return trans.fill_template('/webapps/coralsnp_reports/experiment_of_sample.mako',
affy_id=affy_id,
experiments=experiments,
message=message)
| 47.436364
| 122
| 0.626677
|
f8965df973078ab9eeae0b327ad5828a2b05ccff
| 3,598
|
py
|
Python
|
naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_positionInterpolations.py
|
applejenny66/docker_pepper
|
2469cc4db6585161a31ac44c8fcf2605d71318b1
|
[
"MIT"
] | null | null | null |
naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_positionInterpolations.py
|
applejenny66/docker_pepper
|
2469cc4db6585161a31ac44c8fcf2605d71318b1
|
[
"MIT"
] | null | null | null |
naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_positionInterpolations.py
|
applejenny66/docker_pepper
|
2469cc4db6585161a31ac44c8fcf2605d71318b1
|
[
"MIT"
] | 1
|
2020-10-06T07:44:12.000Z
|
2020-10-06T07:44:12.000Z
|
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use positionInterpolations Method"""
import qi
import argparse
import sys
import almath
import motion
def main(session):
"""
This example uses the positionInterpolations method.
"""
# Get the services ALMotion & ALRobotPosture.
motion_service = session.service("ALMotion")
posture_service = session.service("ALRobotPosture")
# Wake up robot
motion_service.wakeUp()
# Send robot to Pose Init
posture_service.goToPosture("StandInit", 0.5)
# Example showing how to use positionInterpolations
frame = motion.FRAME_ROBOT
useSensorValues = False
dx = 0.03 # translation axis X (meters)
dy = 0.04 # translation axis Y (meters)
# Motion of Arms with block process
effectorList = []
pathList = []
axisMaskList = [motion.AXIS_MASK_VEL, motion.AXIS_MASK_VEL]
timeList = [[1.0], [1.0]] # seconds
effectorList.append("LArm")
currentPos = motion_service.getPosition("LArm", frame, useSensorValues)
targetPos = almath.Position6D(currentPos)
targetPos.y -= dy
pathList.append(list(targetPos.toVector()))
effectorList.append("RArm")
currentPos = motion_service.getPosition("RArm", frame, useSensorValues)
targetPos = almath.Position6D(currentPos)
targetPos.y += dy
pathList.append(list(targetPos.toVector()))
motion_service.positionInterpolations(effectorList, frame, pathList,
axisMaskList, timeList)
# Motion of Arms and Torso with block process
axisMaskList = [motion.AXIS_MASK_VEL,
motion.AXIS_MASK_VEL,
motion.AXIS_MASK_ALL]
timeList = [[4.0],
[4.0],
[1.0, 2.0, 3.0, 4.0]] # seconds
effectorList = []
pathList = []
effectorList.append("LArm")
pathList.append([motion_service.getPosition("LArm", frame, useSensorValues)])
effectorList.append("RArm")
pathList.append([motion_service.getPosition("RArm", frame, useSensorValues)])
effectorList.append("Torso")
torsoList = []
currentPos = motion_service.getPosition("Torso", frame, useSensorValues)
targetPos = almath.Position6D(currentPos)
targetPos.y += dy
torsoList.append(list(targetPos.toVector()))
targetPos = almath.Position6D(currentPos)
targetPos.x -= dx
torsoList.append(list(targetPos.toVector()))
targetPos = almath.Position6D(currentPos)
targetPos.y -= dy
torsoList.append(list(targetPos.toVector()))
targetPos = almath.Position6D(currentPos)
torsoList.append(list(targetPos.toVector()))
pathList.append(torsoList)
motion_service.positionInterpolations(effectorList, frame, pathList,
axisMaskList, timeList)
# Go to rest position
motion_service.rest()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
| 31.017241
| 98
| 0.645081
|
8187046cb6f81f6ccbc026a35e4b290023585e85
| 5,039
|
py
|
Python
|
kubernetes_asyncio/client/models/policy_v1beta1_run_as_group_strategy_options.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/policy_v1beta1_run_as_group_strategy_options.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/policy_v1beta1_run_as_group_strategy_options.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class PolicyV1beta1RunAsGroupStrategyOptions(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'ranges': 'list[PolicyV1beta1IDRange]',
'rule': 'str'
}
attribute_map = {
'ranges': 'ranges',
'rule': 'rule'
}
def __init__(self, ranges=None, rule=None, local_vars_configuration=None): # noqa: E501
"""PolicyV1beta1RunAsGroupStrategyOptions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._ranges = None
self._rule = None
self.discriminator = None
if ranges is not None:
self.ranges = ranges
self.rule = rule
@property
def ranges(self):
"""Gets the ranges of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs. # noqa: E501
:return: The ranges of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
:rtype: list[PolicyV1beta1IDRange]
"""
return self._ranges
@ranges.setter
def ranges(self, ranges):
"""Sets the ranges of this PolicyV1beta1RunAsGroupStrategyOptions.
ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs. # noqa: E501
:param ranges: The ranges of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
:type: list[PolicyV1beta1IDRange]
"""
self._ranges = ranges
@property
def rule(self):
"""Gets the rule of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
rule is the strategy that will dictate the allowable RunAsGroup values that may be set. # noqa: E501
:return: The rule of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
:rtype: str
"""
return self._rule
@rule.setter
def rule(self, rule):
"""Sets the rule of this PolicyV1beta1RunAsGroupStrategyOptions.
rule is the strategy that will dictate the allowable RunAsGroup values that may be set. # noqa: E501
:param rule: The rule of this PolicyV1beta1RunAsGroupStrategyOptions. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and rule is None: # noqa: E501
raise ValueError("Invalid value for `rule`, must not be `None`") # noqa: E501
self._rule = rule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyV1beta1RunAsGroupStrategyOptions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PolicyV1beta1RunAsGroupStrategyOptions):
return True
return self.to_dict() != other.to_dict()
| 33.151316
| 197
| 0.622941
|
8c7baad9f3d7c267002edf4d5def811abb0b1973
| 380
|
py
|
Python
|
cursoemvideo/python/exercicio/039(alistamentoMilitar).py
|
mateusjustino/cursos
|
10927bf62f89b5847bb0acd998e9e9191472d0f4
|
[
"MIT"
] | null | null | null |
cursoemvideo/python/exercicio/039(alistamentoMilitar).py
|
mateusjustino/cursos
|
10927bf62f89b5847bb0acd998e9e9191472d0f4
|
[
"MIT"
] | null | null | null |
cursoemvideo/python/exercicio/039(alistamentoMilitar).py
|
mateusjustino/cursos
|
10927bf62f89b5847bb0acd998e9e9191472d0f4
|
[
"MIT"
] | null | null | null |
from datetime import date
atual = date.today().year
nascimento = int(input('Digite o ano de nascimento: '))
idade = atual - nascimento
if idade < 18:
print('Ainda falta {} anos para voce se alistar'.format(18 - idade))
elif idade == 18:
print('Você está com 18 anos, está na hora de se alistar')
else:
print('Já passou {} anos do seu alistamento'.format(idade - 18))
| 31.666667
| 72
| 0.689474
|
05a40d144a083825177179ba8563edec71ed3a93
| 2,857
|
py
|
Python
|
Homework/week7/Base/DataProcessing.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
Homework/week7/Base/DataProcessing.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
Homework/week7/Base/DataProcessing.py
|
zhufyaxel/ML_SaltyFish
|
84b839fa236c471e1fa8600093f0096ff79e4097
|
[
"MIT"
] | null | null | null |
import os, json
from keras.preprocessing.text import Tokenizer
import _pickle as pk
from keras.preprocessing.sequence import pad_sequences
import numpy as np
from keras.utils import to_categorical
strProjectFolder = os.path.dirname(os.path.dirname(__file__))
strRAWDataFolder = os.path.join(strProjectFolder, "01-RAWData")
strAPDataFolder = os.path.join(strProjectFolder, "02-APData")
class executeETL():
def __init__(self):
self.dictData = {}
def cleanData(self, strDataFileName, boolLabel):
listLabel = []
listText = []
with open(os.path.join(strRAWDataFolder, strDataFileName), "r", encoding="utf8") as data:
for d in data:
if boolLabel:
listRow = d.strip().split(" +++$+++ ")
listLabel.append(int(listRow[0]))
listText.append(listRow[1])
else:
listRow = d.strip().split(",", 1)[1]
if listRow != "text":
listText.append(listRow)
if boolLabel:
self.dictData["Data"] = [listText, listLabel]
else:
self.dictData["Data"] = [listText]
def doTokenizer(self, intVocabSize):
self.tokenizer = Tokenizer(num_words=intVocabSize)
for key in self.dictData:
listTexts = self.dictData[key][0]
self.tokenizer.fit_on_texts(listTexts)
def saveTokenizer(self, strTokenizerFileName):
pk.dump(self.tokenizer, open(os.path.join(strAPDataFolder, strTokenizerFileName), "wb"))
def loadTokenizer(self, strTokenizerFileName):
self.tokenizer = pk.load(open(os.path.join(strAPDataFolder, strTokenizerFileName), "rb"))
def convertWords2Sequence(self, intSequenceLength):
for key in self.dictData:
listSequence = self.tokenizer.texts_to_sequences(self.dictData[key][0])
print("text count start")
listTextCount = []
for t in listSequence:
listTextCount.append(len(t))
import pandas as pd
print(pd.Series(listTextCount).value_counts())
self.dictData[key][0] = np.array(pad_sequences(listSequence, maxlen=intSequenceLength))
def convertLabel2Onehot(self):
for key in self.dictData:
if len(self.dictData[key]) == 2:
self.dictData[key][1] = np.array(to_categorical(self.dictData[key][1]))
def splitData(self, floatRatio):
data = self.dictData["Data"]
X = data[0]
Y = data[1]
intDataSize = len(X)
intValidationSize = int(intDataSize * floatRatio)
return (X[intValidationSize:], Y[intValidationSize:]), (X[:intValidationSize], Y[:intValidationSize])
def getData(self):
return self.dictData["Data"]
| 37.103896
| 109
| 0.610431
|
61bac956eafe57741762451a28f5fad275e1fd04
| 5,742
|
py
|
Python
|
cardea/fhir/Dosage.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 69
|
2021-01-28T22:25:10.000Z
|
2022-03-15T00:23:33.000Z
|
cardea/fhir/Dosage.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 30
|
2018-08-29T12:45:23.000Z
|
2019-12-24T11:08:12.000Z
|
cardea/fhir/Dosage.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 14
|
2021-03-24T01:21:25.000Z
|
2022-03-12T11:53:40.000Z
|
from .fhirbase import fhirbase
class Dosage(fhirbase):
"""
Indicates how the medication is/was taken or should be taken by the
patient.
Args:
sequence: Indicates the order in which the dosage instructions should
be applied or interpreted.
text: Free text dosage instructions e.g. SIG.
additionalInstruction: Supplemental instruction - e.g. "with meals".
patientInstruction: Instructions in terms that are understood by the
patient or consumer.
timing: When medication should be administered.
asNeededBoolean: Indicates whether the Medication is only taken when
needed within a specific dosing schedule (Boolean option), or it
indicates the precondition for taking the Medication
(CodeableConcept).
asNeededCodeableConcept: Indicates whether the Medication is only
taken when needed within a specific dosing schedule (Boolean option),
or it indicates the precondition for taking the Medication
(CodeableConcept).
site: Body site to administer to.
route: How drug should enter body.
method: Technique for administering medication.
doseRange: Amount of medication per dose.
doseSimpleQuantity: Amount of medication per dose.
maxDosePerPeriod: Upper limit on medication per unit of time.
maxDosePerAdministration: Upper limit on medication per
administration.
maxDosePerLifetime: Upper limit on medication per lifetime of the
patient.
rateRatio: Amount of medication per unit of time.
rateRange: Amount of medication per unit of time.
rateSimpleQuantity: Amount of medication per unit of time.
"""
__name__ = 'Dosage'
def __init__(self, dict_values=None):
self.sequence = None
# type: int
self.text = None
# type: str
self.additionalInstruction = None
# type: list
# reference to CodeableConcept
self.patientInstruction = None
# type: str
self.timing = None
# reference to Timing
self.asNeededBoolean = None
# type: bool
self.asNeededCodeableConcept = None
# reference to CodeableConcept
self.site = None
# reference to CodeableConcept
self.route = None
# reference to CodeableConcept
self.method = None
# reference to CodeableConcept
self.doseRange = None
# reference to Range
self.doseSimpleQuantity = None
# reference to Quantity
self.maxDosePerPeriod = None
# reference to Ratio
self.maxDosePerAdministration = None
# reference to Quantity
self.maxDosePerLifetime = None
# reference to Quantity
self.rateRatio = None
# reference to Ratio
self.rateRange = None
# reference to Range
self.rateSimpleQuantity = None
# reference to Quantity
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'doseRange'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'asNeededCodeableConcept'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'maxDosePerPeriod'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'site'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'rateSimpleQuantity'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'rateRatio'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'timing'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'rateRange'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'doseSimpleQuantity'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'maxDosePerLifetime'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'method'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'maxDosePerAdministration'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'route'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Dosage',
'child_variable': 'additionalInstruction'},
]
| 32.440678
| 81
| 0.5876
|
2a190a5f6a30e8d150689861e28a71d781fdee7e
| 12,932
|
py
|
Python
|
salt/states/netntp.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 2
|
2015-06-18T19:07:20.000Z
|
2017-09-27T18:54:29.000Z
|
salt/states/netntp.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-04-15T22:17:42.000Z
|
2016-03-22T08:46:27.000Z
|
salt/states/netntp.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2017-06-16T23:48:13.000Z
|
2021-04-08T17:43:48.000Z
|
# -*- coding: utf-8 -*-
'''
Network NTP
===========
.. versionadded: 2016.11.0
Manage the configuration of NTP peers and servers on the network devices through the NAPALM proxy.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net> & Jerome Fleury <jf@cloudflare.com>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- Requires netaddr_ to be installed: `pip install netaddr` to check if IP
Addresses are correctly specified
- Requires dnspython_ to be installed: `pip install dnspython` to resolve the
nameserver entities (in case the user does not configure the peers/servers
using their IP addresses)
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`NTP operational and configuration management module <salt.modules.napalm_ntp>`
.. _netaddr: https://pythonhosted.org/netaddr/
.. _dnspython: http://www.dnspython.org/
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import 3rd-party libs
from salt.ext import six
# import NAPALM utils
import salt.utils.napalm
try:
from netaddr import IPAddress
from netaddr.core import AddrFormatError
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
try:
import dns.resolver
HAS_DNSRESOLVER = True
except ImportError:
HAS_DNSRESOLVER = False
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = 'netntp'
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
'''
NAPALM library must be installed for this module to work and run in a (proxy) minion.
'''
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _default_ret(name):
ret = {
'name': name,
'changes': {},
'result': False,
'comment': ''
}
return ret
def _retrieve_ntp_peers():
'''Retrieves configured NTP peers'''
return __salt__['ntp.peers']()
def _retrieve_ntp_servers():
'''Retrieves configured NTP servers'''
return __salt__['ntp.servers']()
def _check(peers):
'''Checks whether the input is a valid list of peers and transforms domain names into IP Addresses'''
if not isinstance(peers, list):
return False
for peer in peers:
if not isinstance(peer, six.string_types):
return False
if not HAS_NETADDR: # if does not have this lib installed, will simply try to load what user specified
# if the addresses are not correctly specified, will trow error when loading the actual config
return True
ip_only_peers = []
for peer in peers:
try:
ip_only_peers.append(six.text_type(IPAddress(peer))) # append the str value
except AddrFormatError:
# if not a valid IP Address
# will try to see if it is a nameserver and resolve it
if not HAS_DNSRESOLVER:
continue # without the dns resolver cannot populate the list of NTP entities based on their nameserver
# so we'll move on
dns_reply = []
try:
# try to see if it is a valid NS
dns_reply = dns.resolver.query(peer)
except dns.resolver.NoAnswer:
# no a valid DNS entry either
return False
for dns_ip in dns_reply:
ip_only_peers.append(six.text_type(dns_ip))
peers = ip_only_peers
return True
def _clean(lst):
return [elem for elem in lst if elem]
def _set_ntp_peers(peers):
'''Calls ntp.set_peers.'''
return __salt__['ntp.set_peers'](*peers, commit=False)
def _set_ntp_servers(servers):
'''Calls ntp.set_servers.'''
return __salt__['ntp.set_servers'](*servers, commit=False)
def _delete_ntp_peers(peers):
'''Calls ntp.delete_peers.'''
return __salt__['ntp.delete_peers'](*peers, commit=False)
def _delete_ntp_servers(servers):
'''Calls ntp.delete_servers.'''
return __salt__['ntp.delete_servers'](*servers, commit=False)
def _exec_fun(name, *kargs):
if name in list(globals().keys()):
return globals().get(name)(*kargs)
return None
def _check_diff_and_configure(fun_name, peers_servers, name='peers'):
_ret = _default_ret(fun_name)
_options = ['peers', 'servers']
if name not in _options:
return _ret
_retrieve_fun = '_retrieve_ntp_{what}'.format(what=name)
ntp_list_output = _exec_fun(_retrieve_fun) # contains only IP Addresses as dictionary keys
if ntp_list_output.get('result', False) is False:
_ret['comment'] = 'Cannot retrieve NTP {what} from the device: {reason}'.format(
what=name,
reason=ntp_list_output.get('comment')
)
return _ret
configured_ntp_list = set(ntp_list_output.get('out', {}))
desired_ntp_list = set(peers_servers)
if configured_ntp_list == desired_ntp_list:
_ret.update({
'comment': 'NTP {what} already configured as needed.'.format(
what=name
),
'result': True
})
return _ret
list_to_set = list(desired_ntp_list - configured_ntp_list)
list_to_delete = list(configured_ntp_list - desired_ntp_list)
list_to_set = _clean(list_to_set)
list_to_delete = _clean(list_to_delete)
changes = {}
if list_to_set:
changes['added'] = list_to_set
if list_to_delete:
changes['removed'] = list_to_delete
_ret.update({
'changes': changes
})
if __opts__['test'] is True:
_ret.update({
'result': None,
'comment': 'Testing mode: configuration was not changed!'
})
return _ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_ntp_peers and _delete_ntp_peers as needed ------------------------------------------------------->
expected_config_change = False
successfully_changed = True
comment = ''
if list_to_set:
_set_fun = '_set_ntp_{what}'.format(what=name)
_set = _exec_fun(_set_fun, list_to_set)
if _set.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot set NTP {what}: {reason}'.format(
what=name,
reason=_set.get('comment')
)
if list_to_delete:
_delete_fun = '_delete_ntp_{what}'.format(what=name)
_removed = _exec_fun(_delete_fun, list_to_delete)
if _removed.get('result'):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += 'Cannot remove NTP {what}: {reason}'.format(
what=name,
reason=_removed.get('comment')
)
_ret.update({
'successfully_changed': successfully_changed,
'expected_config_change': expected_config_change,
'comment': comment
})
return _ret
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, peers=None, servers=None):
'''
Manages the configuration of NTP peers and servers on the device, as specified in the state SLS file.
NTP entities not specified in these lists will be removed whilst entities not configured on the device will be set.
SLS Example:
.. code-block:: yaml
netntp_example:
netntp.managed:
- peers:
- 192.168.0.1
- 172.17.17.1
- servers:
- 24.124.0.251
- 138.236.128.36
Output example:
.. code-block:: python
{
'edge01.nrt04': {
'netntp_|-netntp_example_|-netntp_example_|-managed': {
'comment': 'NTP servers already configured as needed.',
'name': 'netntp_example',
'start_time': '12:45:24.056659',
'duration': 2938.857,
'changes': {
'peers': {
'removed': [
'192.168.0.2',
'192.168.0.3'
],
'added': [
'192.168.0.1',
'172.17.17.1'
]
}
},
'result': None
}
}
}
'''
ret = _default_ret(name)
result = ret.get('result', False)
comment = ret.get('comment', '')
changes = ret.get('changes', {})
if not(isinstance(peers, list) or isinstance(servers, list)): # none of the is a list
return ret # just exit
if isinstance(peers, list) and not _check(peers): # check and clean peers
ret['comment'] = 'NTP peers must be a list of valid IP Addresses or Domain Names'
return ret
if isinstance(servers, list) and not _check(servers): # check and clean servers
ret['comment'] = 'NTP servers must be a list of valid IP Addresses or Domain Names'
return ret
# ----- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
successfully_changed = True
expected_config_change = False
if isinstance(peers, list):
_peers_ret = _check_diff_and_configure(name, peers, name='peers')
expected_config_change = _peers_ret.get('expected_config_change', False)
successfully_changed = _peers_ret.get('successfully_changed', True)
result = result and _peers_ret.get('result', False)
comment += ('\n' + _peers_ret.get('comment', ''))
_changed_peers = _peers_ret.get('changes', {})
if _changed_peers:
changes['peers'] = _changed_peers
if isinstance(servers, list):
_servers_ret = _check_diff_and_configure(name, servers, name='servers')
expected_config_change = expected_config_change or _servers_ret.get('expected_config_change', False)
successfully_changed = successfully_changed and _servers_ret.get('successfully_changed', True)
result = result and _servers_ret.get('result', False)
comment += ('\n' + _servers_ret.get('comment', ''))
_changed_servers = _servers_ret.get('changes', {})
if _changed_servers:
changes['servers'] = _changed_servers
ret.update({
'changes': changes
})
if not (changes or expected_config_change):
ret.update({
'result': True,
'comment': 'Device configured properly.'
})
return ret
if __opts__['test'] is True:
ret.update({
'result': None,
'comment': 'This is in testing mode, the device configuration was not changed!'
})
return ret
# <---- Call _set_ntp_peers and _delete_ntp_peers as needed --------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change: # commit only in case there's something to update
config_result, config_comment = __salt__['net.config_control']()
result = config_result and successfully_changed
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
ret.update({
'result': result,
'comment': comment
})
return ret
| 31.541463
| 120
| 0.534797
|
14088831574ee8921007e4d6abcc368234d46f7f
| 5,751
|
py
|
Python
|
login.py
|
aidanMellin/p320_14
|
71d9a4028a7ccdaf5826ca0d723ec2b09f7cace0
|
[
"MIT"
] | null | null | null |
login.py
|
aidanMellin/p320_14
|
71d9a4028a7ccdaf5826ca0d723ec2b09f7cace0
|
[
"MIT"
] | null | null | null |
login.py
|
aidanMellin/p320_14
|
71d9a4028a7ccdaf5826ca0d723ec2b09f7cace0
|
[
"MIT"
] | null | null | null |
from logging import error
import psycopg2
import login
from sshtunnel import SSHTunnelForwarder
from os import getenv
from os.path import exists
from dotenv import load_dotenv
from datetime import datetime
def loginSequence(self):
print("\n\nWelcome to the gitBash Movie Database.\n")
while True:
print("\t===Login Menu===\n"
"[1]. Login to account\n"
"[2]. Create an account\n"
"[3]. Quit")
val = input("Choose an option by typing a number: ")
escape = False
if val in ('1', 'l', 'L', 'login', 'Login',):
while(escape != True):
username = input("Please enter your username: ")
password = input("Please enter your password: ")
self.username = username
try:
self.curs.execute(
"""
SELECT *
FROM \"user\"
WHERE username = %s AND password = %s
""",
[username, password,]
)
match = self.curs.fetchone()
if(match is not None):
escape = True
else:
print("Incorrect username or password.\n")
except (Exception) as error:
print("Something went wrong.\n", error)
self.curs.close()
self.conn.close()
adate = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
self.curs.execute(
"""
UPDATE \"user\"
SET accessdate = %s
WHERE username = %s
""",
[adate, username,]
)
self.curs.execute(
"""
INSERT INTO access_date (username, date)
VALUES(%s,%s)
""",
[username, adate]
)
self.conn.commit()
# print("Welcome, " + username)
except (Exception) as error:
print("Something went wrong.\n", error)
self.curs.close()
self.conn.close()
return username
elif val in ('2', 'c', 'C', 'create', 'Create'):
while(escape != True):
username = input("Please enter a username of 20 characters or less: ")
while (len(username) > 20):
username = input("That username was too long. Please enter a username of 20 characters or less: ")
self.username = username
try:
self.curs.execute(
"""
SELECT *
FROM \"user\"
WHERE username = %s
""",
[username]
)
match = self.curs.fetchone()
if(match is None):
escape = True
else:
print("This username is already in use. Please choose another.\n")
except (Exception) as error:
print("Something went wrong.\n", error)
self.curs.close()
self.conn.close()
escape = False
while(escape != True):
email = input("Please enter your email address: ")
try:
self.curs.execute(
"""
SELECT *
FROM \"user\"
WHERE email = %s
""",
[email]
)
match = self.curs.fetchone()
if(match is None):
escape = True
else:
print("This email is already in use. Please use another.\n")
except (Exception) as error:
print("Something went wrong.\n", error)
self.curs.close()
self.conn.close()
password = input("Please enter a password of 20 characters or less: ")
while (len(password) > 20):
password = input("That password was too long. Please enter a password of 20 characters or less: ")
fname = input("Please enter your first name: ")
lname = input("Please enter your last name: ")
cdate = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
self.curs.execute(
"""
INSERT INTO \"user\" (username, password, email, fname, lname, accessdate, createdate)
VALUES(%s,%s,%s,%s,%s,%s,%s)
""",
[username, password, email, fname, lname, cdate, cdate,]
)
self.curs.execute(
"""
INSERT INTO access_date (username, date)
VALUES(%s,%s)
""",
[username, cdate]
)
self.conn.commit()
except (Exception) as error:
print("Something went wrong.\n", error)
self.curs.close()
self.conn.close()
return username
elif val in ('3', 'q', 'Q', 'quit', 'Quit'):
return None
else:
print("Invalid choice. Please input a valid number.\n")
| 38.086093
| 118
| 0.408451
|
aab6c7d702d9a23134b3256f2297b3177231656d
| 4,858
|
py
|
Python
|
test/corfunc/utest_corfunc.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
test/corfunc/utest_corfunc.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
test/corfunc/utest_corfunc.py
|
llimeht/sasview
|
d0c10746a2397c5021ed8bbc842ba99243a9b0ac
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Unit Tests for CorfuncCalculator class
"""
from __future__ import division, print_function
import os.path
import unittest
import time
import numpy as np
from sas.sascalc.corfunc.corfunc_calculator import CorfuncCalculator
from sas.sascalc.dataloader.data_info import Data1D
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class TestCalculator(unittest.TestCase):
def setUp(self):
self.data = load_data()
# Note: to generate target values from the GUI:
# * load the data from test/corfunc/test/98929.txt
# * set qrange to (0, 0.013), (0.15, 0.24)
# * select fourier transform type
# * click Calculate Bg
# * click Extrapolate
# * click Compute Parameters
# * copy the Guinier and Porod values to the extrapolate function
# * for each graph, grab the data from DataInfo and store it in _out.txt
self.calculator = CorfuncCalculator(data=self.data, lowerq=0.013,
upperq=(0.15, 0.24))
self.calculator.background = 0.3
self.extrapolation = None
self.transformation = None
self.results = [np.loadtxt(find(filename+"_out.txt")).T[2]
for filename in ("gamma1", "gamma3", "idf")]
def extrapolate(self):
params, extrapolation, s2 = self.calculator.compute_extrapolation()
# Check the extrapolation parameters
self.assertAlmostEqual(params['A'], 4.18970, places=5)
self.assertAlmostEqual(params['B'], -25469.9, places=1)
self.assertAlmostEqual(params['K'], 4.44660e-5, places=10)
#self.assertAlmostEqual(params['sigma'], 1.70181e-10, places=15)
# Ensure the extraplation tends to the background value
self.assertAlmostEqual(extrapolation.y[-1], self.calculator.background)
# Test extrapolation for q values between 0.02 and 0.24
mask = np.logical_and(self.data.x > 0.02, self.data.x < 0.24)
qs = self.data.x[mask]
iqs = self.data.y[mask]
for q, iq in zip(qs, iqs):
# Find the q value in the extraplation nearest to the value in
# the data
q_extrap = min(extrapolation.x, key=lambda x:abs(x-q))
# Find the index of this value in the extrapolation
index = list(extrapolation.x).index(q_extrap)
# Find it's corresponding intensity value
iq_extrap = extrapolation.y[index]
# Check the extrapolation agrees to the data at this point to 1 d.p
self.assertAlmostEqual(iq_extrap, iq, 1)
self.extrapolation = extrapolation
def transform(self):
self.calculator.compute_transform(self.extrapolation, 'fourier',
completefn=self.transform_callback)
# Transform is performed asynchronously; give it time to run
while True:
time.sleep(0.001)
if (not self.calculator.transform_isrunning() and
self.transformation is not None):
break
transform1, transform3, idf = self.transformation
self.assertIsNotNone(transform1)
self.assertAlmostEqual(transform1.y[0], 1)
self.assertAlmostEqual(transform1.y[-1], 0, 5)
def transform_callback(self, transforms):
self.transformation = transforms
def extract_params(self):
params = self.calculator.extract_parameters(self.transformation[0])
self.assertIsNotNone(params)
self.assertEqual(len(params), 6)
self.assertLess(abs(params['max']-75), 2.5) # L_p ~= 75
def check_transforms(self):
gamma1, gamma3, idf = self.transformation
gamma1_out, gamma3_out, idf_out = self.results
def compare(a, b):
return max(abs((a-b)/b))
#print("gamma1 diff", compare(gamma1.y[gamma1.x<=200.], gamma1_out))
#print("gamma3 diff", compare(gamma3.y[gamma3.x<=200.], gamma3_out))
#print("idf diff", compare(idf.y[idf.x<=200.], idf_out))
#self.assertLess(compare(gamma1.y[gamma1.x<=200.], gamma1_out), 1e-10)
#self.assertLess(compare(gamma3.y[gamma3.x<=200.], gamma3_out), 1e-10)
#self.assertLess(compare(idf.y[idf.x<=200.], idf_out), 1e-10)
# Ensure tests are ran in correct order;
# Each test depends on the one before it
def test_calculator(self):
steps = [self.extrapolate, self.transform, self.extract_params, self.check_transforms]
for test in steps:
try:
test()
except Exception as e:
raise
self.fail("{} failed ({}: {})".format(test, type(e), e))
def load_data(filename="98929.txt"):
data = np.loadtxt(find(filename), dtype=np.float64)
q = data[:,0]
iq = data[:,1]
return Data1D(x=q, y=iq)
if __name__ == '__main__':
unittest.main()
| 38.555556
| 94
| 0.635035
|
60af0b79990df8c9ba3bb4e99e6866ea49a54456
| 4,087
|
py
|
Python
|
IMU/VTK-6.2.0/IO/XML/Testing/Python/TestEmptyXML.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
IMU/VTK-6.2.0/IO/XML/Testing/Python/TestEmptyXML.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
IMU/VTK-6.2.0/IO/XML/Testing/Python/TestEmptyXML.py
|
timkrentz/SunTracker
|
9a189cc38f45e5fbc4e4c700d7295a871d022795
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# List of types and corresponding file extensions.
types = [[ 'ImageData', 'vti'],
['RectilinearGrid', 'vtr'],
['StructuredGrid', 'vts'],
['PolyData', 'vtp'],
['UnstructuredGrid', 'vtu']]
# We intentionally cause vtkErrorMacro calls to be made below. Dump
# errors to a file to prevent a window from coming up.
fow = vtk.vtkFileOutputWindow()
fow.SetFileName("TestEmptyXMLErrors.txt")
fow.SetFlush(0)
fow.SetInstance(fow)
# Prepare some test files.
f = open('emptyFile.vtk', 'wb')
f.close()
f = open('junkFile.vtk', 'wb')
f.write("v9np7598mapwcawoiur-,rjpmW9MJV28nun-q38ynq-9.8ugujqvt-8n3-nv8")
f.close()
# Test each writer/reader.
for t in types:
type = t[0]
ext = t[1]
input = eval('vtk.vtk' + type + '()')
writer = eval('vtk.vtkXML' + type + 'Writer()')
writer.SetFileName('empty' + type + '.' + ext)
sys.stdout.write('Attempting ' + type + ' write with no input.\n')
writer.Write()
sys.stdout.write('Attempting ' + type + ' write with empty input.\n')
writer.SetInputData(input)
writer.Write()
reader = eval('vtk.vtkXML' + type + 'Reader()')
reader.SetFileName('empty' + type + '.' + ext)
sys.stdout.write('Attempting read from file with empty ' + type + '.\n')
reader.Update()
pwriter = eval('vtk.vtkXMLP' + type + 'Writer()')
pwriter.SetFileName('emptyP' + type + '.p' + ext)
sys.stdout.write('Attempting P' + type + ' write with no input.\n')
pwriter.Write()
sys.stdout.write('Attempting P' + type + ' write with empty input.\n')
pwriter.SetInputData(input)
pwriter.Write()
preader = eval('vtk.vtkXMLP' + type + 'Reader()')
preader.SetFileName('emptyP' + type + '.p' + ext)
sys.stdout.write('Attempting read from file with empty P' + type + '.\n')
preader.Update()
reader.SetFileName("emptyFile.vtk")
preader.SetFileName("emptyFile.vtk")
sys.stdout.write('Attempting read ' + type + ' from empty file.\n')
reader.Update()
sys.stdout.write('Attempting read P' + type + ' from empty file.\n')
preader.Update()
reader.SetFileName("junkFile.vtk")
preader.SetFileName("junkFile.vtk")
sys.stdout.write('Attempting read ' + type + ' from junk file.\n')
reader.Update()
sys.stdout.write('Attempting read P' + type + ' from junk file.\n')
preader.Update()
del input
del writer
del reader
del pwriter
del preader
# Test the data set writers.
for t in types:
type = t[0]
ext = t[1]
writer = vtk.vtkXMLDataSetWriter()
pwriter = vtk.vtkXMLPDataSetWriter()
input = eval('vtk.vtk' + type + '()')
writer.SetFileName('empty' + type + 'DataSet.' + ext)
sys.stdout.write('Attempting DataSet ' + type + ' write with no input.\n')
writer.Write()
sys.stdout.write('Attempting DataSet ' + type + ' write with empty input.\n')
writer.SetInputData(input)
writer.Write()
pwriter.SetFileName('emptyP' + type + 'DataSet.p' + ext)
sys.stdout.write('Attempting DataSet ' + type + ' write with no input.\n')
pwriter.SetNumberOfPieces(1)
pwriter.Write()
sys.stdout.write('Attempting DataSet ' + type + ' write with empty input.\n')
pwriter.SetInputData(input)
pwriter.Write()
del input
del pwriter
del writer
# Done with the file output window.
fow.SetInstance(None)
del fow
# Delete the test files.
for t in types:
type = t[0]
ext = t[1]
os.remove('empty' + type + '.' + ext)
os.remove('empty' + type + 'DataSet.' + ext)
os.remove('emptyP' + type + '.p' + ext)
os.remove('emptyP' + type + '_0.' + ext)
os.remove('emptyP' + type + 'DataSet.p' + ext)
os.remove('emptyP' + type + 'DataSet_0.' + ext)
os.remove('junkFile.vtk')
os.remove('emptyFile.vtk')
os.remove('TestEmptyXMLErrors.txt')
| 31.198473
| 82
| 0.616589
|
91c99185da5c920a02efcc3fd95009c189c51387
| 224
|
py
|
Python
|
app.py
|
polxpolx/sample-flask
|
2cc263b9c78679b2c5e23a9787d381d317088b93
|
[
"MIT"
] | null | null | null |
app.py
|
polxpolx/sample-flask
|
2cc263b9c78679b2c5e23a9787d381d317088b93
|
[
"MIT"
] | null | null | null |
app.py
|
polxpolx/sample-flask
|
2cc263b9c78679b2c5e23a9787d381d317088b93
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/")
def hello_world():
return render_template("index.html")
if __name__ == "__main__":
Schema()
app.run(debug=True)
| 16
| 40
| 0.700893
|
e7db57cf8b5ade9734ba216ba53810e7afaf098f
| 550
|
py
|
Python
|
pyteal/ir/__init__.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | null | null | null |
pyteal/ir/__init__.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | 1
|
2022-03-04T14:57:57.000Z
|
2022-03-04T14:57:57.000Z
|
pyteal/ir/__init__.py
|
CiottiGiorgio/pyteal
|
9646a1aa479786c1e80d6a3821d5db1e6c4a16e2
|
[
"MIT"
] | null | null | null |
from pyteal.ir.ops import Op, Mode
from pyteal.ir.tealcomponent import TealComponent
from pyteal.ir.tealop import TealOp
from pyteal.ir.teallabel import TealLabel
from pyteal.ir.tealblock import TealBlock
from pyteal.ir.tealsimpleblock import TealSimpleBlock
from pyteal.ir.tealconditionalblock import TealConditionalBlock
from pyteal.ir.labelref import LabelReference
__all__ = [
"Op",
"Mode",
"TealComponent",
"TealOp",
"TealLabel",
"TealBlock",
"TealSimpleBlock",
"TealConditionalBlock",
"LabelReference",
]
| 23.913043
| 63
| 0.76
|
375283861e2b5554d87d17c70c275c8bbb39de93
| 444
|
py
|
Python
|
pycreate2/__init__.py
|
ebrukamis/pycreate2
|
20e91d61437d546bce5a3a9b2b8129633fe6e6f6
|
[
"MIT"
] | 45
|
2017-07-16T14:52:54.000Z
|
2022-03-11T16:23:57.000Z
|
pycreate2/__init__.py
|
ebrukamis/pycreate2
|
20e91d61437d546bce5a3a9b2b8129633fe6e6f6
|
[
"MIT"
] | 15
|
2017-07-16T20:52:18.000Z
|
2021-05-04T08:46:59.000Z
|
pycreate2/__init__.py
|
walchko/pycreate2
|
4bb0d86cacb3c57c3abdc99e51a0467628c94f66
|
[
"MIT"
] | 38
|
2017-07-03T09:39:22.000Z
|
2022-03-24T23:39:10.000Z
|
##############################################
# The MIT License (MIT)
# Copyright (c) 2017 Kevin Walchko
# see LICENSE for full details
##############################################
try:
from importlib.metadata import version # type: ignore
except ImportError:
from importlib_metadata import version # type: ignore
from .create2api import Create2
__license__ = 'MIT'
__author__ = 'Kevin Walchko'
__version__ = version("pycreate2")
| 26.117647
| 57
| 0.596847
|
dccc03ee8c57bed7b88eb1ace6853a0d0f34553a
| 132
|
py
|
Python
|
src/secrets_manager/warnings.py
|
CuriBio/secrets-manager
|
b09551874bf5c5aaeda9c21669a90162bf3f0ca9
|
[
"MIT"
] | null | null | null |
src/secrets_manager/warnings.py
|
CuriBio/secrets-manager
|
b09551874bf5c5aaeda9c21669a90162bf3f0ca9
|
[
"MIT"
] | 37
|
2020-08-12T20:39:46.000Z
|
2022-03-30T20:03:20.000Z
|
src/secrets_manager/warnings.py
|
CuriBio/secrets-manager
|
b09551874bf5c5aaeda9c21669a90162bf3f0ca9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Manage access to secrets in an application."""
class KebabCaseSecretNameWarning(UserWarning):
pass
| 18.857143
| 49
| 0.69697
|
c969e1c1bafe71f6d559aac9a449dbdd2dadac9a
| 534
|
py
|
Python
|
aviata/users/admin.py
|
reyuan8/aviata
|
840e2d16a8775e0c80a38bf7e59b31c5bb8b5c31
|
[
"MIT"
] | null | null | null |
aviata/users/admin.py
|
reyuan8/aviata
|
840e2d16a8775e0c80a38bf7e59b31c5bb8b5c31
|
[
"MIT"
] | null | null | null |
aviata/users/admin.py
|
reyuan8/aviata
|
840e2d16a8775e0c80a38bf7e59b31c5bb8b5c31
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from aviata.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + tuple(
auth_admin.UserAdmin.fieldsets
)
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 26.7
| 63
| 0.726592
|
8b17afe341b64bc5932118b625289cd6bbf7a941
| 47
|
py
|
Python
|
scratch/current_test.py
|
MaxWhitney/aft
|
bf9981237438d70c2b8e517e1f4cde75b9eb32a4
|
[
"MIT"
] | 3
|
2019-05-20T23:01:42.000Z
|
2019-12-20T14:22:37.000Z
|
scratch/current_test.py
|
MaxWhitney/aft
|
bf9981237438d70c2b8e517e1f4cde75b9eb32a4
|
[
"MIT"
] | 2
|
2018-12-19T21:31:28.000Z
|
2018-12-19T21:45:56.000Z
|
scratch/current_test.py
|
anniecherk/python_type_fuzzing
|
1b115c8cc5e9196c1ad8e6a5a4d6ec0af0724fc6
|
[
"MIT"
] | 1
|
2019-05-15T21:54:12.000Z
|
2019-05-15T21:54:12.000Z
|
from totest import *
addOne([(False, False)])
| 11.75
| 24
| 0.680851
|
718f4d0b7dab3225129e9e9fc8bf8af032e9a5eb
| 7,944
|
py
|
Python
|
test/algorithms/test_grover_optimizer.py
|
mtreinish/qiskit-optimization
|
03a4710fd31b583d4c2f4b6c0af98922ea045d63
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/test_grover_optimizer.py
|
mtreinish/qiskit-optimization
|
03a4710fd31b583d4c2f4b6c0af98922ea045d63
|
[
"Apache-2.0"
] | null | null | null |
test/algorithms/test_grover_optimizer.py
|
mtreinish/qiskit-optimization
|
03a4710fd31b583d4c2f4b6c0af98922ea045d63
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Grover Optimizer."""
import unittest
from test import QiskitOptimizationTestCase
import numpy as np
from ddt import data, ddt
from docplex.mp.model import Model
from qiskit import Aer
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit_optimization.algorithms import (GroverOptimizer,
MinimumEigenOptimizer,
OptimizationResultStatus)
from qiskit_optimization.converters import (InequalityToEquality,
IntegerToBinary,
LinearEqualityToPenalty,
QuadraticProgramToQubo)
from qiskit_optimization.problems import QuadraticProgram
@ddt
class TestGroverOptimizer(QiskitOptimizationTestCase):
"""GroverOptimizer tests."""
def setUp(self):
super().setUp()
algorithm_globals.random_seed = 1
self.sv_simulator = QuantumInstance(Aer.get_backend('statevector_simulator'),
seed_simulator=921, seed_transpiler=200)
self.qasm_simulator = QuantumInstance(Aer.get_backend('qasm_simulator'),
seed_simulator=123, seed_transpiler=123)
def validate_results(self, problem, results):
"""Validate the results object returned by GroverOptimizer."""
# Get expected value.
solver = MinimumEigenOptimizer(NumPyMinimumEigensolver())
comp_result = solver.solve(problem)
# Validate results.
np.testing.assert_array_almost_equal(comp_result.x, results.x)
self.assertEqual(comp_result.fval, results.fval)
self.assertAlmostEqual(results.fval, results.intermediate_fval)
def test_qubo_gas_int_zero(self):
"""Test for when the answer is zero."""
# Input.
model = Model()
x_0 = model.binary_var(name='x0')
x_1 = model.binary_var(name='x1')
model.minimize(0*x_0+0*x_1)
op = QuadraticProgram()
op.from_docplex(model)
# Will not find a negative, should return 0.
gmf = GroverOptimizer(1, num_iterations=1, quantum_instance=self.sv_simulator)
results = gmf.solve(op)
np.testing.assert_array_almost_equal(results.x, [0, 0])
self.assertEqual(results.fval, 0.0)
self.assertAlmostEqual(results.fval, results.intermediate_fval)
def test_qubo_gas_int_simple(self):
"""Test for simple case, with 2 linear coeffs and no quadratic coeffs or constants."""
# Input.
model = Model()
x_0 = model.binary_var(name='x0')
x_1 = model.binary_var(name='x1')
model.minimize(-x_0+2*x_1)
op = QuadraticProgram()
op.from_docplex(model)
# Get the optimum key and value.
n_iter = 8
gmf = GroverOptimizer(4, num_iterations=n_iter, quantum_instance=self.sv_simulator)
results = gmf.solve(op)
self.validate_results(op, results)
self.assertIsNotNone(results.operation_counts)
self.assertEqual(results.n_input_qubits, 2)
self.assertEqual(results.n_output_qubits, 4)
def test_qubo_gas_int_simple_maximize(self):
"""Test for simple case, but with maximization."""
# Input.
model = Model()
x_0 = model.binary_var(name='x0')
x_1 = model.binary_var(name='x1')
model.maximize(-x_0+2*x_1)
op = QuadraticProgram()
op.from_docplex(model)
# Get the optimum key and value.
n_iter = 8
gmf = GroverOptimizer(4, num_iterations=n_iter, quantum_instance=self.sv_simulator)
results = gmf.solve(op)
self.validate_results(op, results)
@data('sv', 'qasm')
def test_qubo_gas_int_paper_example(self, simulator):
"""
Test the example from https://arxiv.org/abs/1912.04088 using the state vector simulator
and the qasm simulator
"""
# Input.
model = Model()
x_0 = model.binary_var(name='x0')
x_1 = model.binary_var(name='x1')
x_2 = model.binary_var(name='x2')
model.minimize(-x_0+2*x_1-3*x_2-2*x_0*x_2-1*x_1*x_2)
op = QuadraticProgram()
op.from_docplex(model)
# Get the optimum key and value.
n_iter = 10
q_instance = self.sv_simulator if simulator == 'sv' else self.qasm_simulator
gmf = GroverOptimizer(6, num_iterations=n_iter, quantum_instance=q_instance)
results = gmf.solve(op)
self.validate_results(op, results)
def test_converter_list(self):
"""Test converters list"""
# Input.
model = Model()
x_0 = model.binary_var(name='x0')
x_1 = model.binary_var(name='x1')
model.maximize(-x_0+2*x_1)
op = QuadraticProgram()
op.from_docplex(model)
# Get the optimum key and value.
n_iter = 8
# a single converter.
qp2qubo = QuadraticProgramToQubo()
gmf = GroverOptimizer(4, num_iterations=n_iter, quantum_instance=self.sv_simulator,
converters=qp2qubo)
results = gmf.solve(op)
self.validate_results(op, results)
# a list of converters
ineq2eq = InequalityToEquality()
int2bin = IntegerToBinary()
penalize = LinearEqualityToPenalty()
converters = [ineq2eq, int2bin, penalize]
gmf = GroverOptimizer(4, num_iterations=n_iter, quantum_instance=self.sv_simulator,
converters=converters)
results = gmf.solve(op)
self.validate_results(op, results)
# invalid converters
with self.assertRaises(TypeError):
invalid = [qp2qubo, "invalid converter"]
GroverOptimizer(4, num_iterations=n_iter,
quantum_instance=self.sv_simulator,
converters=invalid)
def test_samples_and_raw_samples(self):
"""Test samples and raw_samples"""
op = QuadraticProgram()
op.integer_var(0, 3, 'x')
op.binary_var('y')
op.minimize(linear={'x': 1, 'y': 2})
op.linear_constraint(linear={'x': 1, 'y': 1}, sense='>=', rhs=1, name='xy')
opt_sol = 1
success = OptimizationResultStatus.SUCCESS
algorithm_globals.random_seed = 1
grover_optimizer = GroverOptimizer(
5, num_iterations=2, quantum_instance=self.qasm_simulator)
result = grover_optimizer.solve(op)
self.assertEqual(len(result.samples), 8)
self.assertEqual(len(result.raw_samples), 32)
self.assertAlmostEqual(sum(s.probability for s in result.samples), 1)
self.assertAlmostEqual(sum(s.probability for s in result.raw_samples), 1)
self.assertAlmostEqual(min(s.fval for s in result.samples), 0)
self.assertAlmostEqual(min(s.fval for s in result.samples if s.status == success), opt_sol)
self.assertAlmostEqual(min(s.fval for s in result.raw_samples), opt_sol)
for sample in result.raw_samples:
self.assertEqual(sample.status, success)
np.testing.assert_array_almost_equal(result.x, result.samples[0].x)
self.assertAlmostEqual(result.fval, result.samples[0].fval)
self.assertEqual(result.status, result.samples[0].status)
if __name__ == '__main__':
unittest.main()
| 39.72
| 99
| 0.640232
|
4e3daaa3c46f54218eb861697e0d7a5dc528785d
| 8,678
|
py
|
Python
|
paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py
|
hysunflower/PaddleSpeech
|
bf393573a4f738b163c672e075999b97f445a39a
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py
|
hysunflower/PaddleSpeech
|
bf393573a4f738b163c672e075999b97f445a39a
|
[
"Apache-2.0"
] | null | null | null |
paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py
|
hysunflower/PaddleSpeech
|
bf393573a4f738b163c672e075999b97f445a39a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
import paddle
from paddle import distributed as dist
from paddle.io import DataLoader
from paddle.nn import Layer
from paddle.optimizer import Optimizer
from paddle.optimizer.lr import LRScheduler
from timer import timer
from paddlespeech.t2s.training.extensions.evaluator import StandardEvaluator
from paddlespeech.t2s.training.reporter import report
from paddlespeech.t2s.training.updaters.standard_updater import StandardUpdater
from paddlespeech.t2s.training.updaters.standard_updater import UpdaterState
logging.basicConfig(
format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s',
datefmt='[%Y-%m-%d %H:%M:%S]')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class PWGUpdater(StandardUpdater):
def __init__(self,
models: Dict[str, Layer],
optimizers: Dict[str, Optimizer],
criterions: Dict[str, Layer],
schedulers: Dict[str, LRScheduler],
dataloader: DataLoader,
discriminator_train_start_steps: int,
lambda_adv: float,
output_dir=None):
self.models = models
self.generator: Layer = models['generator']
self.discriminator: Layer = models['discriminator']
self.optimizers = optimizers
self.optimizer_g: Optimizer = optimizers['generator']
self.optimizer_d: Optimizer = optimizers['discriminator']
self.criterions = criterions
self.criterion_stft = criterions['stft']
self.criterion_mse = criterions['mse']
self.schedulers = schedulers
self.scheduler_g = schedulers['generator']
self.scheduler_d = schedulers['discriminator']
self.dataloader = dataloader
self.discriminator_train_start_steps = discriminator_train_start_steps
self.lambda_adv = lambda_adv
self.state = UpdaterState(iteration=0, epoch=0)
self.train_iterator = iter(self.dataloader)
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
def update_core(self, batch):
self.msg = "Rank: {}, ".format(dist.get_rank())
losses_dict = {}
# parse batch
wav, mel = batch
# Generator
noise = paddle.randn(wav.shape)
with timer() as t:
wav_ = self.generator(noise, mel)
# logging.debug(f"Generator takes {t.elapse}s.")
# initialize
gen_loss = 0.0
## Multi-resolution stft loss
with timer() as t:
sc_loss, mag_loss = self.criterion_stft(wav_, wav)
# logging.debug(f"Multi-resolution STFT loss takes {t.elapse}s.")
report("train/spectral_convergence_loss", float(sc_loss))
report("train/log_stft_magnitude_loss", float(mag_loss))
losses_dict["spectral_convergence_loss"] = float(sc_loss)
losses_dict["log_stft_magnitude_loss"] = float(mag_loss)
gen_loss += sc_loss + mag_loss
## Adversarial loss
if self.state.iteration > self.discriminator_train_start_steps:
with timer() as t:
p_ = self.discriminator(wav_)
adv_loss = self.criterion_mse(p_, paddle.ones_like(p_))
# logging.debug(
# f"Discriminator and adversarial loss takes {t.elapse}s")
report("train/adversarial_loss", float(adv_loss))
losses_dict["adversarial_loss"] = float(adv_loss)
gen_loss += self.lambda_adv * adv_loss
report("train/generator_loss", float(gen_loss))
losses_dict["generator_loss"] = float(gen_loss)
with timer() as t:
self.optimizer_g.clear_grad()
gen_loss.backward()
# logging.debug(f"Backward takes {t.elapse}s.")
with timer() as t:
self.optimizer_g.step()
self.scheduler_g.step()
# logging.debug(f"Update takes {t.elapse}s.")
# Disctiminator
if self.state.iteration > self.discriminator_train_start_steps:
with paddle.no_grad():
wav_ = self.generator(noise, mel)
p = self.discriminator(wav)
p_ = self.discriminator(wav_.detach())
real_loss = self.criterion_mse(p, paddle.ones_like(p))
fake_loss = self.criterion_mse(p_, paddle.zeros_like(p_))
dis_loss = real_loss + fake_loss
report("train/real_loss", float(real_loss))
report("train/fake_loss", float(fake_loss))
report("train/discriminator_loss", float(dis_loss))
losses_dict["real_loss"] = float(real_loss)
losses_dict["fake_loss"] = float(fake_loss)
losses_dict["discriminator_loss"] = float(dis_loss)
self.optimizer_d.clear_grad()
dis_loss.backward()
self.optimizer_d.step()
self.scheduler_d.step()
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
class PWGEvaluator(StandardEvaluator):
def __init__(self,
models,
criterions,
dataloader,
lambda_adv,
output_dir=None):
self.models = models
self.generator = models['generator']
self.discriminator = models['discriminator']
self.criterions = criterions
self.criterion_stft = criterions['stft']
self.criterion_mse = criterions['mse']
self.dataloader = dataloader
self.lambda_adv = lambda_adv
log_file = output_dir / 'worker_{}.log'.format(dist.get_rank())
self.filehandler = logging.FileHandler(str(log_file))
logger.addHandler(self.filehandler)
self.logger = logger
self.msg = ""
def evaluate_core(self, batch):
# logging.debug("Evaluate: ")
self.msg = "Evaluate: "
losses_dict = {}
wav, mel = batch
noise = paddle.randn(wav.shape)
with timer() as t:
wav_ = self.generator(noise, mel)
# logging.debug(f"Generator takes {t.elapse}s")
## Adversarial loss
with timer() as t:
p_ = self.discriminator(wav_)
adv_loss = self.criterion_mse(p_, paddle.ones_like(p_))
# logging.debug(
# f"Discriminator and adversarial loss takes {t.elapse}s")
report("eval/adversarial_loss", float(adv_loss))
losses_dict["adversarial_loss"] = float(adv_loss)
gen_loss = self.lambda_adv * adv_loss
# stft loss
with timer() as t:
sc_loss, mag_loss = self.criterion_stft(wav_, wav)
# logging.debug(f"Multi-resolution STFT loss takes {t.elapse}s")
report("eval/spectral_convergence_loss", float(sc_loss))
report("eval/log_stft_magnitude_loss", float(mag_loss))
losses_dict["spectral_convergence_loss"] = float(sc_loss)
losses_dict["log_stft_magnitude_loss"] = float(mag_loss)
gen_loss += sc_loss + mag_loss
report("eval/generator_loss", float(gen_loss))
losses_dict["generator_loss"] = float(gen_loss)
# Disctiminator
p = self.discriminator(wav)
real_loss = self.criterion_mse(p, paddle.ones_like(p))
fake_loss = self.criterion_mse(p_, paddle.zeros_like(p_))
dis_loss = real_loss + fake_loss
report("eval/real_loss", float(real_loss))
report("eval/fake_loss", float(fake_loss))
report("eval/discriminator_loss", float(dis_loss))
losses_dict["real_loss"] = float(real_loss)
losses_dict["fake_loss"] = float(fake_loss)
losses_dict["discriminator_loss"] = float(dis_loss)
self.msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in losses_dict.items())
self.logger.info(self.msg)
| 37.405172
| 79
| 0.629523
|
2cf4c8de957b722bcc1c47ac05df1f0788544bfd
| 15,154
|
py
|
Python
|
hio-yocto-bsp/sources/poky/scripts/lib/mic/imager/direct.py
|
qiangzai00001/hio-prj
|
060ff97fe21093b1369db78109d5b730b2b181c8
|
[
"MIT"
] | null | null | null |
hio-yocto-bsp/sources/poky/scripts/lib/mic/imager/direct.py
|
qiangzai00001/hio-prj
|
060ff97fe21093b1369db78109d5b730b2b181c8
|
[
"MIT"
] | null | null | null |
hio-yocto-bsp/sources/poky/scripts/lib/mic/imager/direct.py
|
qiangzai00001/hio-prj
|
060ff97fe21093b1369db78109d5b730b2b181c8
|
[
"MIT"
] | null | null | null |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# DESCRIPTION
# This implements the 'direct' image creator class for 'wic', based
# loosely on the raw image creator from 'mic'
#
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
import os
import stat
import shutil
from mic import kickstart, msger
from mic.utils import fs_related, runner, misc
from mic.utils.partitionedfs import PartitionedMount
from mic.utils.errors import CreatorError, MountError
from mic.imager.baseimager import BaseImageCreator
from mic.utils.oe.misc import *
from mic.plugin import pluginmgr
disk_methods = {
"do_install_disk":None,
}
class DirectImageCreator(BaseImageCreator):
"""
Installs a system into a file containing a partitioned disk image.
DirectImageCreator is an advanced ImageCreator subclass; an image
file is formatted with a partition table, each partition created
from a rootfs or other OpenEmbedded build artifact and dd'ed into
the virtual disk. The disk image can subsequently be dd'ed onto
media and used on actual hardware.
"""
def __init__(self, oe_builddir, image_output_dir, rootfs_dir, bootimg_dir,
kernel_dir, native_sysroot, hdddir, staging_data_dir,
creatoropts=None, pkgmgr=None, compress_image=None,
generate_bmap=None, fstab_entry="uuid"):
"""
Initialize a DirectImageCreator instance.
This method takes the same arguments as ImageCreator.__init__()
"""
BaseImageCreator.__init__(self, creatoropts, pkgmgr)
self.__instimage = None
self.__imgdir = None
self.__disks = {}
self.__disk_format = "direct"
self._disk_names = []
self._ptable_format = self.ks.handler.bootloader.ptable
self.use_uuid = fstab_entry == "uuid"
self.compress_image = compress_image
self.bmap_needed = generate_bmap
self.oe_builddir = oe_builddir
if image_output_dir:
self.tmpdir = image_output_dir
self.cachedir = "%s/cache" % image_output_dir
self.rootfs_dir = rootfs_dir
self.bootimg_dir = bootimg_dir
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
self.hdddir = hdddir
self.staging_data_dir = staging_data_dir
def __write_fstab(self, image_rootfs):
"""overriden to generate fstab (temporarily) in rootfs. This
is called from mount_instroot, make sure it doesn't get called
from BaseImage.mount()"""
if image_rootfs is None:
return None
fstab = image_rootfs + "/etc/fstab"
if not os.path.isfile(fstab):
return None
parts = self._get_parts()
self._save_fstab(fstab)
fstab_lines = self._get_fstab(fstab, parts)
self._update_fstab(fstab_lines, parts)
self._write_fstab(fstab, fstab_lines)
return fstab
def _update_fstab(self, fstab_lines, parts):
"""Assume partition order same as in wks"""
for num, p in enumerate(parts, 1):
if not p.mountpoint or p.mountpoint == "/" or p.mountpoint == "/boot":
continue
if self._ptable_format == 'msdos' and num > 3:
device_name = "/dev/" + p.disk + str(num + 1)
else:
device_name = "/dev/" + p.disk + str(num)
fstab_entry = device_name + "\t" + p.mountpoint + "\t" + p.fstype + "\tdefaults\t0\t0\n"
fstab_lines.append(fstab_entry)
def _write_fstab(self, fstab, fstab_lines):
fstab = open(fstab, "w")
for line in fstab_lines:
fstab.write(line)
fstab.close()
def _save_fstab(self, fstab):
"""Save the current fstab in rootfs"""
shutil.copyfile(fstab, fstab + ".orig")
def _restore_fstab(self, fstab):
"""Restore the saved fstab in rootfs"""
if fstab is None:
return
shutil.move(fstab + ".orig", fstab)
def _get_fstab(self, fstab, parts):
"""Return the desired contents of /etc/fstab."""
f = open(fstab, "r")
fstab_contents = f.readlines()
f.close()
return fstab_contents
def set_bootimg_dir(self, bootimg_dir):
"""
Accessor for bootimg_dir, the actual location used for the source
of the bootimg. Should be set by source plugins (only if they
change the default bootimg source) so the correct info gets
displayed for print_outimage_info().
"""
self.bootimg_dir = bootimg_dir
def _get_parts(self):
if not self.ks:
raise CreatorError("Failed to get partition info, "
"please check your kickstart setting.")
# Set a default partition if no partition is given out
if not self.ks.handler.partition.partitions:
partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
args = partstr.split()
pd = self.ks.handler.partition.parse(args[1:])
if pd not in self.ks.handler.partition.partitions:
self.ks.handler.partition.partitions.append(pd)
# partitions list from kickstart file
return kickstart.get_partitions(self.ks)
def get_disk_names(self):
""" Returns a list of physical target disk names (e.g., 'sdb') which
will be created. """
if self._disk_names:
return self._disk_names
#get partition info from ks handler
parts = self._get_parts()
for i in range(len(parts)):
if parts[i].disk:
disk_name = parts[i].disk
else:
raise CreatorError("Failed to create disks, no --ondisk "
"specified in partition line of ks file")
if parts[i].mountpoint and not parts[i].fstype:
raise CreatorError("Failed to create disks, no --fstype "
"specified for partition with mountpoint "
"'%s' in the ks file")
self._disk_names.append(disk_name)
return self._disk_names
def _full_name(self, name, extention):
""" Construct full file name for a file we generate. """
return "%s-%s.%s" % (self.name, name, extention)
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
return os.path.join(path, self._full_name(name, extention))
def get_default_source_plugin(self):
"""
The default source plugin i.e. the plugin that's consulted for
overall image generation tasks outside of any particular
partition. For convenience, we just hang it off the
bootloader handler since it's the one non-partition object in
any setup. By default the default plugin is set to the same
plugin as the /boot partition; since we hang it off the
bootloader object, the default can be explicitly set using the
--source bootloader param.
"""
return self.ks.handler.bootloader.source
#
# Actual implemention
#
def _mount_instroot(self, base_on = None):
"""
For 'wic', we already have our build artifacts and don't want
to loop mount anything to install into, we just create
filesystems from the artifacts directly and combine them into
a partitioned image.
We still want to reuse as much of the basic mic machinery
though; despite the fact that we don't actually do loop or any
other kind of mounting we still want to do many of the same
things to prepare images, so we basically just adapt to the
basic framework and reinterpret what 'mounting' means in our
context.
_instroot would normally be something like
/var/tmp/wic/build/imgcreate-s_9AKQ/install_root, for
installing packages, etc. We don't currently need to do that,
so we simplify life by just using /var/tmp/wic/build as our
workdir.
"""
parts = self._get_parts()
self.__instimage = PartitionedMount(self._instroot)
for p in parts:
# as a convenience, set source to the boot partition source
# instead of forcing it to be set via bootloader --source
if not self.ks.handler.bootloader.source and p.mountpoint == "/boot":
self.ks.handler.bootloader.source = p.source
for p in parts:
# need to create the filesystems in order to get their
# sizes before we can add them and do the layout.
# PartitionedMount.mount() actually calls __format_disks()
# to create the disk images and carve out the partitions,
# then self.install() calls PartitionedMount.install()
# which calls __install_partitition() for each partition
# to dd the fs into the partitions. It would be nice to
# be able to use e.g. ExtDiskMount etc to create the
# filesystems, since that's where existing e.g. mkfs code
# is, but those are only created after __format_disks()
# which needs the partition sizes so needs them created
# before its called. Well, the existing setup is geared
# to installing packages into mounted filesystems - maybe
# when/if we need to actually do package selection we
# should modify things to use those objects, but for now
# we can avoid that.
p.prepare(self, self.workdir, self.oe_builddir, self.rootfs_dir,
self.bootimg_dir, self.kernel_dir, self.native_sysroot)
fstab = self.__write_fstab(p.get_rootfs())
self._restore_fstab(fstab)
self.__instimage.add_partition(int(p.size),
p.disk,
p.mountpoint,
p.source_file,
p.fstype,
p.label,
fsopts = p.fsopts,
boot = p.active,
align = p.align,
part_type = p.part_type)
self.__instimage.layout_partitions(self._ptable_format)
self.__imgdir = self.workdir
for disk_name, disk in self.__instimage.disks.items():
full_path = self._full_path(self.__imgdir, disk_name, "direct")
msger.debug("Adding disk %s as %s with size %s bytes" \
% (disk_name, full_path, disk['min_size']))
disk_obj = fs_related.DiskImage(full_path, disk['min_size'])
self.__disks[disk_name] = disk_obj
self.__instimage.add_disk(disk_name, disk_obj)
self.__instimage.mount()
def install(self, repo_urls=None):
"""
Install fs images into partitions
"""
for disk_name, disk in self.__instimage.disks.items():
full_path = self._full_path(self.__imgdir, disk_name, "direct")
msger.debug("Installing disk %s as %s with size %s bytes" \
% (disk_name, full_path, disk['min_size']))
self.__instimage.install(full_path)
def configure(self, repodata = None):
"""
Configure the system image according to kickstart.
For now, it just prepares the image to be bootable by e.g.
creating and installing a bootloader configuration.
"""
source_plugin = self.get_default_source_plugin()
if source_plugin:
self._source_methods = pluginmgr.get_source_plugin_methods(source_plugin, disk_methods)
for disk_name, disk in self.__instimage.disks.items():
self._source_methods["do_install_disk"](disk, disk_name, self,
self.workdir,
self.oe_builddir,
self.bootimg_dir,
self.kernel_dir,
self.native_sysroot)
def print_outimage_info(self):
"""
Print the image(s) and artifacts used, for the user.
"""
msg = "The new image(s) can be found here:\n"
parts = self._get_parts()
for disk_name, disk in self.__instimage.disks.items():
full_path = self._full_path(self.__imgdir, disk_name, "direct")
msg += ' %s\n\n' % full_path
msg += 'The following build artifacts were used to create the image(s):\n'
for p in parts:
if p.get_rootfs() is None:
continue
if p.mountpoint == '/':
str = ':'
else:
str = '["%s"]:' % p.label
msg += ' ROOTFS_DIR%s%s\n' % (str.ljust(20), p.get_rootfs())
msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
msger.info(msg)
def _get_boot_config(self):
"""
Return the rootdev/root_part_uuid (if specified by
--part-type)
Assume partition order same as in wks
"""
rootdev = None
root_part_uuid = None
parts = self._get_parts()
for num, p in enumerate(parts, 1):
if p.mountpoint == "/":
part = ''
if p.disk.startswith('mmcblk'):
part = 'p'
if self._ptable_format == 'msdos' and num > 3:
rootdev = "/dev/%s%s%-d" % (p.disk, part, num + 1)
else:
rootdev = "/dev/%s%s%-d" % (p.disk, part, num)
root_part_uuid = p.part_type
return (rootdev, root_part_uuid)
def _unmount_instroot(self):
if not self.__instimage is None:
try:
self.__instimage.cleanup()
except MountError, err:
msger.warning("%s" % err)
| 39.774278
| 100
| 0.590735
|
15dafa12ff05008e13373f810feaeaf57b4050e0
| 1,252
|
py
|
Python
|
forge_sdk/rpc/forge_rpc/file.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 9
|
2019-05-08T01:30:22.000Z
|
2020-05-08T22:11:40.000Z
|
forge_sdk/rpc/forge_rpc/file.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 22
|
2019-05-14T18:36:17.000Z
|
2019-12-24T10:09:42.000Z
|
forge_sdk/rpc/forge_rpc/file.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | null | null | null |
from forge_sdk.protos import protos
from forge_sdk.rpc import lib
class ForgeFileRpc:
def __init__(self, channel):
self.stub = protos.FileRpcStub(channel)
def store_file(self, chunk):
"""GRPC call to store file
Args:
chunk(bytes or list[bytes]): file bytes to store
Returns:
ResponseStoreFile
"""
def to_req(item):
return protos.RequestStoreFile(chunk=item)
requests = lib.to_iter(to_req, chunk)
return self.stub.store_file(requests)
def load_file(self, file_hash):
"""GRPC call to load stored file
Args:
file_hash(string): hash of stored file
Returns:
ResponseLoadFile(stream)
"""
req_kwargs = {
'hash': file_hash,
}
request = protos.RequestLoadFile(**req_kwargs)
return self.stub.load_file(request)
def pin_file(self, file_hash):
"""GRPC call to pin file so Forge will keep the file
Args:
file_hash(string): hash of the file to pin
Returns:
ResponsePinFile
"""
request = protos.RequestPinFile(hash=file_hash)
return self.stub.pin_file(request)
| 22.357143
| 60
| 0.590256
|
7cb4abc0f63965be5a08d0370f4f2224b74e90c4
| 6,456
|
py
|
Python
|
pywikibot/xmlreader.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
pywikibot/xmlreader.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
pywikibot/xmlreader.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
XML reading module.
Each XmlEntry object represents a page, as read from an XML source
The XmlDump class reads a pages_current XML dump (like the ones offered on
https://dumps.wikimedia.org/backup-index.html) and offers a generator over
XmlEntry objects which can be used by other bots.
"""
#
# (C) Pywikibot team, 2005-2013
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 86185cf75e74b4f72e076c4b62bd9dbdd029f76e $'
#
import threading
import re
from xml.etree.cElementTree import iterparse
import xml.sax
from pywikibot.tools import open_compressed
def parseRestrictions(restrictions):
"""
Parse the characters within a restrictions tag.
Returns strings representing user groups allowed to edit and
to move a page, where None means there are no restrictions.
"""
if not restrictions:
return None, None
editRestriction = None
moveRestriction = None
editLockMatch = re.search('edit=([^:]*)', restrictions)
if editLockMatch:
editRestriction = editLockMatch.group(1)
moveLockMatch = re.search('move=([^:]*)', restrictions)
if moveLockMatch:
moveRestriction = moveLockMatch.group(1)
if restrictions == 'sysop':
editRestriction = 'sysop'
moveRestriction = 'sysop'
return editRestriction, moveRestriction
class XmlEntry:
"""Represent a page."""
def __init__(self, title, ns, id, text, username, ipedit, timestamp,
editRestriction, moveRestriction, revisionid, comment,
redirect):
"""Constructor."""
# TODO: there are more tags we can read.
self.title = title
self.ns = ns
self.id = id
self.text = text
self.username = username.strip()
self.ipedit = ipedit
self.timestamp = timestamp
self.editRestriction = editRestriction
self.moveRestriction = moveRestriction
self.revisionid = revisionid
self.comment = comment
self.isredirect = redirect
class XmlParserThread(threading.Thread):
"""
XML parser that will run as a single thread.
This allows the XmlDump
generator to yield pages before the parser has finished reading the
entire dump.
There surely are more elegant ways to do this.
"""
def __init__(self, filename, handler):
"""Constructor."""
threading.Thread.__init__(self)
self.filename = filename
self.handler = handler
def run(self):
"""Parse the file in a single thread."""
xml.sax.parse(self.filename, self.handler)
class XmlDump(object):
"""
Represents an XML dump file.
Reads the local file at initialization,
parses it, and offers access to the resulting XmlEntries via a generator.
@param allrevisions: boolean
If True, parse all revisions instead of only the latest one.
Default: False.
"""
def __init__(self, filename, allrevisions=False):
"""Constructor."""
self.filename = filename
if allrevisions:
self._parse = self._parse_all
else:
self._parse = self._parse_only_latest
def parse(self):
"""Generator using cElementTree iterparse function."""
with open_compressed(self.filename) as source:
# iterparse's event must be a str but they are unicode with
# unicode_literals in Python 2
context = iterparse(source, events=(str('start'), str('end'),
str('start-ns')))
self.root = None
for event, elem in context:
if event == "start-ns" and elem[0] == "":
self.uri = elem[1]
continue
if event == "start" and self.root is None:
self.root = elem
continue
for rev in self._parse(event, elem):
yield rev
def _parse_only_latest(self, event, elem):
"""Parser that yields only the latest revision."""
if event == "end" and elem.tag == "{%s}page" % self.uri:
self._headers(elem)
revision = elem.find("{%s}revision" % self.uri)
yield self._create_revision(revision)
elem.clear()
self.root.clear()
def _parse_all(self, event, elem):
"""Parser that yields all revisions."""
if event == "start" and elem.tag == "{%s}page" % self.uri:
self._headers(elem)
if event == "end" and elem.tag == "{%s}revision" % self.uri:
yield self._create_revision(elem)
elem.clear()
self.root.clear()
def _headers(self, elem):
"""Extract headers from XML chunk."""
self.title = elem.findtext("{%s}title" % self.uri)
self.ns = elem.findtext("{%s}ns" % self.uri)
self.pageid = elem.findtext("{%s}id" % self.uri)
self.restrictions = elem.findtext("{%s}restrictions" % self.uri)
self.isredirect = elem.findtext("{%s}redirect" % self.uri) is not None
self.editRestriction, self.moveRestriction = parseRestrictions(
self.restrictions)
def _create_revision(self, revision):
"""Create a Single revision."""
revisionid = revision.findtext("{%s}id" % self.uri)
timestamp = revision.findtext("{%s}timestamp" % self.uri)
comment = revision.findtext("{%s}comment" % self.uri)
contributor = revision.find("{%s}contributor" % self.uri)
ipeditor = contributor.findtext("{%s}ip" % self.uri)
username = ipeditor or contributor.findtext("{%s}username" % self.uri)
# could get comment, minor as well
text = revision.findtext("{%s}text" % self.uri)
return XmlEntry(title=self.title,
ns=self.ns,
id=self.pageid,
text=text or u'',
username=username or u'', # username might be deleted
ipedit=bool(ipeditor),
timestamp=timestamp,
editRestriction=self.editRestriction,
moveRestriction=self.moveRestriction,
revisionid=revisionid,
comment=comment,
redirect=self.isredirect
)
| 34.15873
| 78
| 0.597119
|
bb01b4a03adf1ff54554e44dbf3eb92aae61ce8a
| 12,439
|
py
|
Python
|
tests/python/sparse_tensor.py
|
dajenet/MinkowskiEngine
|
0523264aa1054911404c9e58b59d4b7aeb43e840
|
[
"MIT"
] | null | null | null |
tests/python/sparse_tensor.py
|
dajenet/MinkowskiEngine
|
0523264aa1054911404c9e58b59d4b7aeb43e840
|
[
"MIT"
] | null | null | null |
tests/python/sparse_tensor.py
|
dajenet/MinkowskiEngine
|
0523264aa1054911404c9e58b59d4b7aeb43e840
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import numpy as np
import torch
from MinkowskiEngine import (
SparseTensor,
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
clear_global_coordinate_manager,
is_cuda_available,
)
from MinkowskiEngine.utils import batched_coordinates, sparse_quantize, sparse_collate
from tests.python.common import data_loader, load_file
class SparseTensorTestCase(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test SparseTensor")
coords, feats, labels = data_loader(nchannel=2)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_empty(self):
print(f"{self.__class__.__name__}: test_empty SparseTensor")
feats = torch.FloatTensor(0, 16)
coords = torch.IntTensor(0, 4)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_tensor_stride(self):
print(f"{self.__class__.__name__}: test_tensor_stride SparseTensor")
feats = torch.FloatTensor(4, 16)
coords = torch.IntTensor(
[[0, 4, 2, 1], [0, 4, 0, 0], [0, 4, 4, 4], [0, 4, 4, 7]]
)
print(coords)
input = SparseTensor(feats, coordinates=coords, tensor_stride=4)
self.assertEqual(input.tensor_stride, [4, 4, 4])
print(input)
def test_force_creation(self):
print(f"{self.__class__.__name__}: test_force_creation")
coords, feats, labels = data_loader(nchannel=2)
input1 = SparseTensor(feats, coordinates=coords)
input2 = SparseTensor(
feats, coordinates=coords, coordinate_manager=input1.coordinate_manager
)
print(input1.coordinate_map_key, input2.coordinate_map_key)
def test_device(self):
print(f"{self.__class__.__name__}: test_device SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device2(self):
print(f"{self.__class__.__name__}: test_device SparseTensor")
if not is_cuda_available():
return
coordinates = np.random.rand(8192,3) * 200
quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)
bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])
bcoords, bfeats = bcoords.cuda(), bfeats.cuda()
print(bcoords, bfeats)
SparseTensor(bfeats, bcoords)
def test_quantization(self):
print(f"{self.__class__.__name__}: test_quantization")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_quantization_gpu(self):
print(f"{self.__class__.__name__}: test_quantization_gpu")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
print(input)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
print(sinput)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_extraction(self):
print(f"{self.__class__.__name__}: test_extraction")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords)
C0 = X.coordinates_at(0)
F0 = X.features_at(0)
self.assertTrue(0 in C0)
self.assertTrue(1 in C0)
self.assertTrue(2 in C0)
self.assertTrue(1.1 in F0)
self.assertTrue(2.1 in F0)
self.assertTrue(3.1 in F0)
CC0, FC0 = X.coordinates_and_features_at(0)
self.assertTrue((C0 == CC0).all())
self.assertTrue((F0 == FC0).all())
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
if not is_cuda_available():
return
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords, device=0)
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
def test_features_at_coordinates(self):
print(f"{self.__class__.__name__}: test_features_at_coordinates")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(features=feats, coordinates=coords)
feats = X.features_at_coordinates(
torch.FloatTensor([[0, 0], [0, 1], [0, 2], [2, 2], [0, 0], [0, 0.5]])
).flatten()
self.assertTrue(feats[0] == 1.1)
self.assertTrue(feats[3] == 5.1)
self.assertTrue(feats[4] == 1.1)
def test_decomposition(self):
print(f"{self.__class__.__name__}: test_decomposition")
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats, bcoords)
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_decomposition_gpu(self):
print(f"{self.__class__.__name__}: test_decomposition_gpu")
if not torch.cuda.is_available():
return
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats.to(0), bcoords.to(0))
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_operation_mode(self):
print(f"{self.__class__.__name__}: test_operation_mode")
# Set to use the global sparse tensor coords manager by default
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
)
coords, feats, labels = data_loader(nchannel=2)
# Create a sparse tensor on two different coordinates.
A = SparseTensor(torch.rand(feats.shape), coordinates=coords)
B = SparseTensor(
torch.rand(4, 2),
coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]),
)
self.assertTrue(A.coordinate_manager == B.coordinate_manager)
A.requires_grad_(True)
B.requires_grad_(True)
C = A + B
C.F.sum().backward()
self.assertTrue(torch.all(A.F.grad == 1).item())
self.assertTrue(torch.all(B.F.grad == 1).item())
C = A - B
C = A * B
C = A / B
# Inplace
A.requires_grad_(False)
D = SparseTensor(
torch.rand(feats.shape),
coordinate_map_key=A.coordinate_map_key,
coordinate_manager=A.coordinate_manager,
)
A -= D
A *= D
A /= D
clear_global_coordinate_manager()
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER
)
| 38.871875
| 86
| 0.610178
|
141849683b4610c16987dcfbe315968aadfd57e3
| 1,346
|
py
|
Python
|
secret_santa.py
|
ReaLgressA/SecretSantaScript
|
727a4087415febdbc5c3111b10760a1fc823a00c
|
[
"MIT"
] | null | null | null |
secret_santa.py
|
ReaLgressA/SecretSantaScript
|
727a4087415febdbc5c3111b10760a1fc823a00c
|
[
"MIT"
] | null | null | null |
secret_santa.py
|
ReaLgressA/SecretSantaScript
|
727a4087415febdbc5c3111b10760a1fc823a00c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Instructions:
# Create input.txt with names of participants on every string. (use UTF-8 encoding, please)
# Edit message template for your own purposes.
# Run script and send everyone their personal messages generated in Secret Santa folder.
# Merry Christmas, folks!
import os
import random
resultDirName = 'Secret Santa'
#msgTemplate = "Your custom message template. First argument is the presenter name. Second one - doney's name."
msgTemplate = "С наступающим тебя, %s!\n%s с нетерпением ждёт твоего незабываемого подарка, постарайся сделать его особенным :)\nУвидимся в эту пятницу в 11:30 в холле ИКСа. Там ты сможешь подарить свой подарок и встретить своего Тайного Санту.\nС уважением,\nНовогодний креатив от АС-132"
presenters = open("input.txt").read().splitlines()
doneys = list(presenters)
pairs = {}
while len(presenters) > 0:
doneyIdx = random.randint(0, len(doneys) - 1)
while presenters[0] == doneys[doneyIdx]:
doneyIdx = random.randint(0, len(doneys) - 1)
pairs[presenters[0]] = doneys[doneyIdx]
del presenters[0]
del doneys[doneyIdx]
if not os.path.exists(resultDirName):
os.makedirs(resultDirName)
for pair in pairs:
file = open("%s/%s.txt" % (resultDirName, pairs[pair].decode('utf-8')), "w")
file.write(msgTemplate % (pairs[pair], pair))
| 49.851852
| 289
| 0.72734
|
f91c31973790b1fe1c7df2e6f4b8f85ec633cc35
| 22,708
|
py
|
Python
|
qwe/planning/Planner.py
|
IEEERobotics/high-level
|
a50f2170ca81a16bd50b50f970f9e3fe9c656bfa
|
[
"BSD-2-Clause"
] | 1
|
2017-08-07T06:03:53.000Z
|
2017-08-07T06:03:53.000Z
|
qwe/planning/Planner.py
|
IEEERobotics/high-level
|
a50f2170ca81a16bd50b50f970f9e3fe9c656bfa
|
[
"BSD-2-Clause"
] | null | null | null |
qwe/planning/Planner.py
|
IEEERobotics/high-level
|
a50f2170ca81a16bd50b50f970f9e3fe9c656bfa
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Code for python planner
"""
import blockSim as BlockDet
import twoWayDict as twd
import navigation.nav as nav
from datetime import datetime
import comm.serial_interface as comm
import math
import time
import logging.config
pixelsToInches = 0.0195
class Planner:
nextSeaLandBlock = [] #list of the next available sea or land block to pick up
nextAirBlock = [] #list of the 2 air blocks in storage
armList = [] #list of which arm contains what
armID = [2, 0] # armID[0] = right arm, armID[1] = left arm
storageSim = []
seaBlockSim = {}
landBlockSim = {}
airBlockSim = []
nextSeaBlockLoc = []
nextLandBlockLoc = []
nextAirBlockLoc = []
scannedSeaLocs = {}
scannedLandLocs = {}
colors = []
#nsbl = twd.TwoWayDict()
#nlbl = twd.TwoWayDict()
def __init__(self, bot_loc, blobs, blocks, zones, waypoints, scPlanner, bot_state, qMove_nav, logger):
"""Setup navigation class
:param bot_loc: Shared dict updated with best-guess location of bot by localizer
:param blocks: list of blocks
:param zones:
:param waypoints: Multiprocessing.Queue object for passing movement feedback to localizer from navigator
:param si: Serial interface object for sending commands to low-level boards
:param bot_state: Dict of information about the current state of the bot (ex macro/micro nav)
:param qMove_nav: Multiprocessing.Queue object for passing movement commands to navigation (mostly from Planner)
"""
# Store passed-in data
self.bot_loc = bot_loc
self.blobs = blobs
self.blocks = blocks
self.zones = zones
self.waypoints = waypoints
self.scPlanner = scPlanner
self.bot_state = bot_state
self.qMove_nav = qMove_nav
self.logger = logger
self.bot_state["cv_blockDetect"] = False
self.bot_state["cv_lineTrack"] = False
self.armID[0] = comm.right_arm
self.armID[1] = comm.left_arm
self.nextSeaLandBlock = ["St01","St02","St03","St04","St05","St06","St07","St08","St09","St10","St11","St12","St13","St14"]
self.nextAirBlock = []
self.nextSeaBlockLoc = ["Se01","Se02","Se03","Se04","Se05","Se06"]
self.nextLandBlockLoc = ["L01","L02","L03","L04","L05","L06"]
self.colors = ["red", "blue", "green", "orange", "brown", "yellow"]
for i in range(len(self.nextSeaLandBlock)):
self.zones[i] = 1
for i in range(len(self.nextSeaBlockLoc)):
self.zones[i] = 0
for i in range(len(self.nextLandBlockLoc)):
self.zones[i] = 0
for i in range(len(self.colors)):
self.scannedSeaLocs[self.colors[i]] = "empty"
self.scannedLandLocs[self.colors[i]] = "empty"
self.bot_state["zone_change"] = 1
#get current location
def getCurrentLocation(self):
return self.bot_loc
def wait(self, blocking):
while blocking != False:
continue
def getBlobNearCenter(self):
closestBlob = None
mindist = 641 #some large number at least as large as width of image
direction = 1
mindir = direction
for blob in self.blobs: # a more pythonic way of iterating
direction = 1
x,y,w,h = blob.bbox
blockDist = (320 - (x + w / 2)) # TODO use SimpleBlob.center instead?
if blockDist < 0:
blockDist = blockDist * -1
direction = -1
if blockDist < mindist:
mindist = blockDist
closestBlob = blob
mindir = direction
mindist = pixelsToInches * mindist
return closestBlob, mindir, mindist
#more to the next block - use this if next block location is
#handled by nav instead of planner
def moveToNextBlock(self):
pass
#print "Moving to Next Block"
#self.moveTo(self.getCurrentLocation(), "nextblock loc")
#micro or macro???
#move from start to end
def moveToWayPoint(self, startLoc, endLoc):
#print "Moving from ", startLoc, " to ", endLoc, "--", self.waypoints[endLoc]
self.logger.info("Moving from "+ str(startLoc)+ " to "+str(endLoc)+ "--"+str(self.waypoints[endLoc]))
x, y = self.waypoints[endLoc][1]
theta = self.waypoints[endLoc][2]
speed = self.waypoints[endLoc][3]
self.bot_state["naving"] = True
macro_m = nav.macro_move(x, y, theta, datetime.now())
self.qMove_nav.put(macro_m)
#self.wait(self.bot_state["naving"])
while self.bot_state["naving"] != False:
continue
def microMove(self, distance, direction):
#print "Moving from ", startLoc, " to ", endLoc
micro_m = nav.micro_move_XY(distance, comm.default_speed * direction, datetime.now())
self.qMove_nav.put(micro_m)
def moveUpRamp(self, loc1, loc2):
self.logger.info("Moving up the ramp from "+ str(loc1)+ " to "+str(loc2))
x1, y1 = self.waypoints[loc1][1]
x2, y2 = self.waypoints[loc2][1]
distance = math.sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
speed = comm.default_speed * 1.2
micro_m = nav.micro_move_XY(distance, speed, datetime.now())
self.qMove_nav.put(micro_m)
def alignWithCenter(self, loc):
pass
#this is supposed to align with the expected white line...
#one possible solution:
# a repeated forward and backward micromoves until we are aligned
#dist = 320-(x+w/2)
#direction = 1
#if dist < 0:
# direction = -1
#print "Distance to center: ", dist, "pixels -- ", dist*0.0192, "inches --", dist*0.0192*1622/9.89,"revolutions"
#self.micromove(dist, direction)
def processSeaLand(self, startTime):
armCount = 0
self.armList = []
self.logger.info("+++++ +++++ Beginning to pick and place sea and land blocks +++++ +++++")
for i in range(len(self.nextSeaLandBlock)):
elapsedTime = datetime.now() - startTime
if elapsedTime.seconds > 250:
self.logger.debug("Don't you have a flight to catch?") #time to start processing air.
#things to do: if location of both airblocks are known, pick them up, else continue scanning
# if one of the arms has a block, use the other arm to pick up block, place other block down
# if both arms have blocks -- this is not good!
stID = self.nextSeaLandBlock[i];
#print "Processing: [", stID, self.waypoints[stID], "]"
self.logger.info("Processing: ["+ str(stID)+ str(self.waypoints[stID])+ "]")
#movement along the whiteline, close to the blocks
self.bot_state["cv_blockDetect"] = False
self.bot_state["cv_lineTrack"] = True
self.moveToWayPoint(self.getCurrentLocation(), stID)
#get block from vision, do not track lines
self.bot_state["cv_blockDetect"] = True
self.bot_state["cv_lineTrack"] = False
#self.wait(self.bot_state["cv_blockDetect"])
while self.bot_state["cv_blockDetect"] != False:
continue
if self.blobs == None:
continue
block, direction, distance = self.getBlobNearCenter()
#block = self.storageSim[i]
#print "Processing: [", block.getColor(), block.getSize(), block.getLocation(), "]"
#if the block is small, assign that to list of air blocks
#continue / move to next block
#if block.getSize() == "small":
if block.length == "small":
self.nextAirBlock.append([stID, block])
continue
#in order to pick up a block, first check if bot is centered
#self.alignWithCenter()
self.microMove(distance, direction)
#self.pickUpBlock(armCount) #arm 0 or 1.
self.scPlanner.armPick(self.armID[armCount])
self.zones[stID] = 0
self.bot_state["zone_change"] = self.bot_state["zone_change"] + 1
self.armList.append(block)
armCount = armCount + 1;
if armCount == 2:
self.logger.info("picked up 2 blocks")
#when dropping blocks off, offset the center of the bot
#about 0.5 from the center of the dropoff zone
#Both arms contain sea blocks
#if self.armList[0].getSize() == "medium" and self.armList[1].getSize() == "medium":
if self.armList[0].length == "medium" and self.armList[1].length == "medium":
self.moveToWayPoint(self.getCurrentLocation(), "sea")
self.goToNextSeaDropOff(self.armList[0])
#self.placeBlock(0)
self.scPlanner.armDrop(self.armID[0])
self.goToNextSeaDropOff(self.armList[1])
#self.placeBlock(1)
self.scPlanner.armDrop(self.armID[1])
#Both arms contain land blocks
#elif self.armList[0].getSize() == "large" and self.armList[1].getSize() == "large":
elif self.armList[0].length == "large" and self.armList[1].length == "large":
self.moveToWayPoint(self.getCurrentLocation(), "land")
self.goToNextLandDropOff(self.armList[0])
#self.placeBlock(0)
self.scPlanner.armDrop(self.armID[0])
self.goToNextLandDropOff(self.armList[1])
#self.placeBlock(1)
self.scPlanner.armDrop(self.armID[1])
#One arm contains sea block and other contains land block
#elif self.armList[0].getSize() == "medium" and self.armList[1].getSize() == "large":
elif self.armList[0].length == "medium" and self.armList[1].length == "large":
self.moveToWayPoint(self.getCurrentLocation(), "sea")
self.goToNextSeaDropOff(self.armList[0])
#self.placeBlock(0)
self.scPlanner.armDrop(self.armID[0])
self.moveToWayPoint(self.getCurrentLocation(), "land")
self.goToNextLandDropOff(self.armList[1])
#self.placeBlock(1)
self.scPlanner.armDrop(self.armID[1])
#One arm contains land block and other contains sea block
#elif self.armList[0].getSize() == "large" and self.armList[1].getSize() == "medium":
elif self.armList[0].length == "large" and self.armList[1].length == "medium":
# even if the orders are different, first go to sea then land
self.moveToWayPoint(self.getCurrentLocation(), "sea")
self.goToNextSeaDropOff(self.armList[1])
#self.placeBlock(1)
self.scPlanner.armDrop(self.armID[1])
self.moveToWayPoint(self.getCurrentLocation(), "land")
self.goToNextLandDropOff(self.armList[0])
#self.placeBlock(0)
self.scPlanner.armDrop(self.armID[0])
armCount = 0
self.armList = []
self.logger.info("===== ===== ===== ===== ===== ===== ===== ===== ===== =====")
self.moveToWayPoint(self.getCurrentLocation(), "storage")
#end if
#end for
#end processSeaLand
def processAir(self):
for i in range(len(self.nextAirBlock)):
block = self.nextAirBlock[i];
#print "Processing: [", block.getColor(), block.getSize(), block.getLocation(), "]"
#self.pickUpBlock(block.getLocation(), i);
stID = block[0]
self.bot_state["cv_blockDetect"] = False
self.bot_state["cv_lineTrack"] = True
self.moveToWayPoint(self.getCurrentLocation(), stID)
self.bot_state["cv_blockDetect"] = True
self.bot_state["cv_lineTrack"] = False
#self.wait(self.bot_state["cv_blockDetect"])
while self.bot_state["cv_blockDetect"] != False:
continue
if self.blobs == None:
continue
block, direction, distance = self.getBlobNearCenter()
#in order to pick up a block, first check if bot is centered
#self.alignWithCenter()
self.microMove(distance, direction)
self.pickUpBlock(block[0], i);
#end for
self.logger.info("Move to Ramp, up the ramp, to the drop-off")
self.moveUpRamp("grnd2ramp","lwrPlt") #up the ramp
self.moveToWayPoint(self.getLocation(), "lwrPlt") #align in the right direction on lower platform
self.moveUpRamp("lwrPlt", "air") #up the long ramp
#self.moveToWayPoint(self.getCurrentLocation(), "grnd2ramp") #normal speed
#self.moveToWayPoint(self.getCurrentLocation(), "lwrPlt") #ramp speed
#self.moveToWayPint(self.getCurrentLocation(), "uprRamp") #normal speed
#self.moveToWayPoint(self.getCurrentLocation(), "air") #ramp speed
self.logger.info("Drop Air Blocks")
self.logger.info("Placing First Air Block")
self.moveToWayPoint(self.getCurrentLocation(), "A01")
color, direction, distance = self.getAirDropOffColor() # TODO color may be "none" with invalid direction and distance - take care of that
if color is not None:
if color == self.armList[0].color:
self.placeBlock(0)
else:
self.placeBlock(1)
self.logger.info("Placing Second Air Block")
self.moveToWayPoint(self.getCurrentLocation(), "A02")
color, direction, distance = self.getAirDropOffColor()
if color is not None:
if color == self.armList[0].color:
self.placeBlock(0)
else:
self.placeBlock(1)
#self.goToNextAirDropOff(self.armList[1])
#self.placeBlock(1)
def getAvailableSeaDropOffs():
availList = []
for i in range(len(nextSeaBlockLoc)):
if zones[nextSeaBlockLoc[i]] == 0:
availList.append(nextSeaBlockLoc[i])
return availList
def getAvailableLandDropOffs():
availList = []
for i in range(len(nextLandBlockLoc)):
if zones[nextLandBlockLoc[i]] == 0:
availList.append(nextLandBlockLoc[i])
return availList
#go to the next sea dropoff zone
#separate function to handle sea specific movements
def goToNextSeaDropOff(self, block):
# if seaDropLocList is empty, go to Se01
# else, check if color of either block matches
availList = self.getAvailableSeaDropOffs()
#blockColor = block.getColor()
blockColor = block.color
#movement along white lines
self.bot_state["cv_blockDetect"] = False
self.bot_state["cv_lineTrack"] = True
if self.scannedSeaLocs[blockColor] == "empty":
#block location unknown
for i in range(len(availList)):
self.moveToWayPoint(self.getCurrentLocation(), availList[i])
#get the color at waypoint from vision
self.bot_state["cv_blockDetect"] = True
self.bot_state["cv_lineTrack"] = False
#self.wait(self.bot_state["cv_blockDetect"])
while self.bot_state["cv_blockDetect"] != False:
continue
if self.blobs == None:
continue
#color = seaBlockSim[availList[i]]
zone, direction, distance = self.getBlobNearCenter()
color = zone.color
if color == blockColor:
#found color
#align with center
#TODO :add condition for when distacne is less than a particular val
#and, to micromove based on which arm is being lowered.
self.zones[ availList[i]] = 1
self.bot_state["zone_change"] = self.bot_state["zone_change"] + 1
self.microMove(distance, direction)
break
else:
self.scannedSeaLocs[color] = availList[i]
#end if-else
#end for
else:
self.moveToWayPoint(self.getCurrentLocation(), self.scannedSeaLocs[blockColor])
self.zones[self.scannedSeaLocs[blockColor]] = 1
self.bot_state["zone_change"] = self.bot_state["zone_change"] + 1
#go to the next land dropoff zone
#separate function to handle land specific movements
def goToNextLandDropOff(self, block):
availList = self.getAvailableLandDropOffs()
#blockColor = block.getColor()
blockColor = block.color
#movement along white lines
self.bot_state["cv_blockDetect"] = False
self.bot_state["cv_lineTrack"] = True
if self.scannedLandLocs[blockColor] == "empty":
#block location unknown
for i in range(len(availList)):
self.moveToWayPoint(self.getCurrentLocation(), availList[i])
#get the color at waypoint from vision
self.bot_state["cv_blockDetect"] = True
self.bot_state["cv_lineTrack"] = False
#self.wait(self.bot_state["cv_blockDetect"])
while self.bot_state["cv_blockDetect"] != False:
continue
if self.blobs == None:
continue
#color = LandBlockSim[availList[i]]
zone, direction, distance = self.getBlobNearCenter()
color = zone.color
if color == blockColor:
#found color
#align with center
#TODO :add condition for when distacne is less than a particular val
#and, to micromove based on which arm is being lowered.
self.microMove(distance, direction)
self.zones[availList[i]] = 1
self.bot_state["zone_change"] = self.bot_state["zone_change"] + 1
break
else:
self.scannedLandLocs[color] = availList[i]
else:
self.moveToWayPoint(self.getCurrentLocation(), self.scannedLandLocs[blockColor])
self.zones[self.scannedLandLocs[blockColor]] = 1
self.bot_state["zone_change"] = self.bot_state["zone_change"] + 1
def getAirDropOffColor(self):
self.bot_state["cv_blockDetect"] = True
self.bot_state["cv_lineTrack"] = False
#self.wait(self.bot_state["cv_blockDetect"])
while self.bot_state["cv_blockDetect"] != False:
continue
if self.blobs == None:
return None, direction, distance
#color = LandBlockSim[availList[i]]
zone, direction, distance = self.getBlobNearCenter()
color = zone.color if zone is not None else "none"
return color, direction, distance
#pass
# pick up a block given armID
def pickUpBlock(self, arm):
armId = self.armID[arm]
#self.moveTo(self.getCurrentLocation(), blockLoc)
#print "Picking Up Block at ", blockLoc, "with Arm", armId
#call vision to make sure we are centered on the block
#if we are not centered, micromove
self.scPlanner.gripperOpen(armId)
self.scPlanner.armDown(armId)
self.scPlanner.gripperClose(armId)
self.scPlanner.armUp(armId)
# place a block given armID
def placeBlock(self, arm):
armId = self.armID[arm]
#self.moveTo(self.getCurrentLocation(), blockLoc)
#print "Placing block from ", self.armID[arm], "at", blockLoc
#call vision to make sure we are centered on the block
#if we are not centered, micromove
self.scPlanner.armDown(armId)
self.scPlanner.gripperOpen(armId)
self.scPlanner.armUp(armId)
self.scPlanner.gripperClose(armId)
#main
def start(self):
#self.storageSimulator("storage") ##use when vision is not available
#self.dropOffSimulator("sea") ##use when vision is not available
#self.dropOffSimulator("land")
self.logger.info("Move to Storage Start")
startTime = datetime.now()
self.moveToWayPoint(self.getCurrentLocation(), "storage")
#print "Scan Storage"
#self.scanStorageFirstTime("storage") # BUG: This should not be hardcoded. Currently fails.
#print "Move to Storage Start"
#self.moveToWayPoint(self.getCurrentLocation(), "storage")
print "***********************************************"
print "********** Processing - Sea and Land **********"
print "***********************************************"
self.processSeaLand(startTime)
print "**************************************"
print "********** Processing - Air **********"
print "**************************************"
self.processAir()
def test(self):
print self.waypoints
print len(self.waypoints)
print self.waypoints["storage"]
# Scan the given location for the first time
# use this if scanning first then dropping blocks
# def scanFirstTime(self, loc):
# print "Scanning Storage"
# print "Initiating Storage Scan"
# print "updating list of sea, land and air blocks"
#
# count = 0
# if loc == "storage":
# count = 14
# else:
# count = 6
#
# #Scan all blocks in area
# for i in range(count):
# #replace blocksim with block detector code
# bs = BlockDet.BlockSim()
# block = bs.process(loc,i)
# ## possibly update block location here
# ## block.setLocation(self.getCurrentLocation());
# print "scanning block", i+1, ":", block.getColor(), block.getSize(), block.getLocation()
#
# if loc == "storage":
# if block.getSize() == "small":
# self.nextAirBlock.append(block)
# else:
# self.nextSeaLandBlock.append(block)
# elif loc == "land":
# self.nextLandBlockLoc.append(block)
# elif loc == "sea":
# self.nextSeaBlockLoc.append(block)
# # Update target location for next block
#
# nextLoc = block.getLocation();
# bLoc = [int(nextLoc[0]), int(nextLoc[1])]
# bLoc[1] = bLoc[1] + 2; # change 2 to appropriate value
#
# self.moveTo(self.getCurrentLocation(), bLoc)
#end for
#print self.nextAirBlock
#print self.nextSeaLandBlock
#end scanFirstTime
# use this if dropping off blocks during scan
def storageSimulator(self, loc):
print "-- Using Block Detection Simulator"
#Scan all 14 blocks in storage area
for i in range(14):
bs = BlockDet.BlockSim()
block = bs.process(loc,i)
self.storageSim.append(block)
#end for
#end scanStorageFirstTime
# use this if dropping off blocks during scan
def dropOffSimulator(self, loc):
print "-- Using Zone Dection Simulator"
print "Initiating", loc, "scan"
for i in range(6):
print "scanning", loc, "block location", i+1
bs = BlockDet.BlockSim()
blockLoc = bs.process(loc,i)
if loc == "land":
self.landBlockSim[blockLoc.getLocation()] = blockLoc.getColor()
#self.nlbl[blockLoc.getColor()] = blockLoc.getLocation()
elif loc == "sea":
self.seaBlockSim[blockLoc.getLocation()] = blockLoc.getColor()
#self.nsbl[blockLoc.getColor()] = blockLoc.getLocation()
#end if
#end scanLandorSeaFirstTime
def run(bot_loc, blobs, blocks, zones, waypoints, scPlanner, bot_state, qMove_nav, logger=None):
if logger is None:
logging.config.fileConfig("logging.conf") # TODO This will break if not called from qwe. Add check to fix based on cwd?
logger = logging.getLogger(__name__)
logger.debug("Logger is set up")
logger.debug("Executing run function of Planner")
plan = Planner(bot_loc, blobs, blocks, zones, waypoints, scPlanner, bot_state, qMove_nav, logger)
#plan.test()
plan.start()
logger.debug("Completed Planner Execution")
if __name__ == "__main__":
plan = Planner() #will fail... needs waypoints from map.
plan.start()
| 36.863636
| 142
| 0.634094
|
e1e3d19a623a0da78ca47fe6597434d295a02447
| 946
|
py
|
Python
|
09. Data structure/Ranges.py
|
riyabhatia26/Python-Programming
|
2882728982c15c3b6380033eb2e90761b538dd93
|
[
"MIT"
] | 3
|
2020-08-07T04:33:19.000Z
|
2021-10-06T08:58:01.000Z
|
09. Data structure/Ranges.py
|
riyabhatia26/Python-Programming
|
2882728982c15c3b6380033eb2e90761b538dd93
|
[
"MIT"
] | null | null | null |
09. Data structure/Ranges.py
|
riyabhatia26/Python-Programming
|
2882728982c15c3b6380033eb2e90761b538dd93
|
[
"MIT"
] | 2
|
2021-10-06T08:58:05.000Z
|
2021-10-06T09:46:42.000Z
|
# Range: Sequence representing an arithmetic progression of intgers
# By default it initialized from0
a = range(3)
print(a)
for i in a:
print(a)
# Iterating over a range
for i in range(5):
print(i)
"""
Range signature
1. One argument: means argument stop value
range(stop)
2. Two arguments: means argument contains start and stop values
range(start,stop)
3. Three arguments: means argument contains start,stop and step values
range(start,stop,step)
* Range does not support keyword arguments
"""
# Iterating over a list
b =[3423,23423,465,786,8132,6578]
for i in b:
print(i)
# Enumerate
# Constructs an iterable of (index, value ) tuples around iterable object
print('Enumerate list 1: ')
l1 =[232,4456,567,879,980,1346,658]
for i in enumerate(l1):
print(i)
# Enumerate using tuple unpacking
print('Enumerate list 1 using tuple unpacking: ')
for i,v in enumerate(l1):
print(f"index= {i}, value= {v}")
| 21.022222
| 73
| 0.710359
|
0afc30f7eb353ef67478749d93089830c8c44a6c
| 165
|
py
|
Python
|
setup.py
|
hydrargyrum/sit-tagger
|
cf5c8e7ebad7e97fd695c565586dd9fa342a571f
|
[
"WTFPL"
] | 3
|
2017-12-16T15:52:50.000Z
|
2019-10-15T06:50:14.000Z
|
setup.py
|
hydrargyrum/sit-tagger
|
cf5c8e7ebad7e97fd695c565586dd9fa342a571f
|
[
"WTFPL"
] | null | null | null |
setup.py
|
hydrargyrum/sit-tagger
|
cf5c8e7ebad7e97fd695c565586dd9fa342a571f
|
[
"WTFPL"
] | 2
|
2019-06-05T19:36:58.000Z
|
2020-05-17T13:01:07.000Z
|
#!/usr/bin/env python3
# this project is licensed under the WTFPLv2, see COPYING.txt for details
from setuptools import setup
if __name__ == '__main__':
setup()
| 18.333333
| 73
| 0.745455
|
fd0074e4f8bffdb37d305d9b85952bf85a08bd78
| 2,621
|
py
|
Python
|
Callum/Day16/Packet.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
Callum/Day16/Packet.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
Callum/Day16/Packet.py
|
JackDanielHarding/advent-of-code-2021
|
5b860e36b4ac1af205c992763167ffef41a81a1b
|
[
"CC0-1.0"
] | null | null | null |
from functools import reduce
class Packet():
def __init__(self, bits: str):
self.version = int(bits[0:3], 2)
self.type = int(bits[3:6], 2)
self.subPackets = []
if self.type == 4:
index = 6
val = ""
while True:
firstBit = bits[index]
val += bits[index+1:index+5]
index += 5
if firstBit == '0':
self.literal = int(val, 2)
self.lastIndex = index
break
else:
self.lType = bits[6]
if self.lType == '0':
index = 22
subPacketBits = bits[7:index]
subPacketBitsInt = int(subPacketBits, 2)
parsedBits = 0
while True:
subPacket = Packet(bits[index:index+subPacketBitsInt])
self.subPackets.append(subPacket)
parsedBits += subPacket.lastIndex
index += subPacket.lastIndex
if parsedBits >= subPacketBitsInt:
self.lastIndex = index
break
else:
index = 18
subPackets = int(bits[7:index], 2)
parsedSubPackets = 0
while True:
subPacket = Packet(bits[index:])
self.subPackets.append(subPacket)
parsedSubPackets += 1
index += subPacket.lastIndex
if parsedSubPackets >= subPackets:
self.lastIndex = index
break
def NestedVersionSum(self) -> int:
return self.version + sum(packet.NestedVersionSum() for packet in self.subPackets)
def PacketValue(self) -> int:
subPacketValues = (packet.PacketValue() for packet in self.subPackets)
match self.type:
case 0:
return sum(subPacketValues)
case 1:
return reduce(lambda x, y: x * y, subPacketValues)
case 2:
return min(subPacketValues)
case 3:
return max(subPacketValues)
case 4:
return self.literal
case 5:
return 1 if next(subPacketValues) > next(subPacketValues) else 0
case 6:
return 1 if next(subPacketValues) < next(subPacketValues) else 0
case 7:
return 1 if next(subPacketValues) == next(subPacketValues) else 0
| 37.442857
| 90
| 0.470813
|
9a6d919ef4475462117e764ebe2500935886da78
| 5,067
|
py
|
Python
|
restkit/handlers/http_user_handlers/query_handler.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
restkit/handlers/http_user_handlers/query_handler.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
restkit/handlers/http_user_handlers/query_handler.py
|
ppolxda/restkit
|
eeb6177ccd75f8ba7b2faa252116f1e745d0f91b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@create: 2019-09-28 18:09:50.
@author: name
@desc: query_handler
"""
from restkit.transactions import trans_func
from restkit.handlers.http_user_conns.http_sessions import HttpSessionBaseHandler # noqa
# from restkit.tools.error_info import _
from restkit.tools.error_info import error_info
class HttpQueryHandler(HttpSessionBaseHandler):
requert_parames_config = {
}
query_sql = None
query_where_sql = None
sum_query_sql = None
sum_query_where_sql = None
history_query_sql = None
history_query_where_sql = None
history_sum_query_sql = None
history_sum_query_where_sql = None
QUERY_TODAY_DEFINE = {}
QUERY_TODAY_SUM_DEFINE = {}
QUERY_HISTORY_DEFINE = {}
QUERY_HISTORY_SUM_DEFINE = {}
# ----------------------------------------------
# today query
# ----------------------------------------------
def permission_where(self):
return {
'userid': self.session().userid
}
def conv_result(self, results):
return results
@trans_func
def get_today_processing(self):
dbtrans = yield self.dbase_begin()
where = self.get_where_parames()
permission = self.permission_where()
if self.query_where_sql:
permission.update(self.query_where_sql)
result = yield self.query_sql.query_data(
dbtrans, where['where'], permission,
where['page'], where['sort']
)
self.write_error_json_raise(
error_info.ERROR_SUCESS,
results=self.conv_result(result) if result is not None else []
)
@trans_func
def get_today_sum_processing(self):
dbtrans = yield self.dbase_begin()
where = self.get_where_parames()
permission = self.permission_where()
if self.sum_query_where_sql:
permission.update(self.sum_query_where_sql)
result = yield self.sum_query_sql.query_data(
dbtrans, where['where'], permission,
where['page'], where['sort']
)
self.write_error_json_raise(
error_info.ERROR_SUCESS,
results=self.conv_result(result) if result is not None else []
)
@trans_func
def post_today_csv_processing(self):
where = self.get_where_parames_from_json()
permission = self.permission_where()
if self.query_where_sql:
permission.update(self.query_where_sql)
sql_str, sql_parames = self.query_sql.query_sql_string(
where['where'], permission,
where['page'], where['sort']
)
self.gsettings.csv_add_task(
self.session_userid_str(),
sql_str, sql_parames,
self.QUERY_TODAY_DEFINE['cnname_define'],
self.QUERY_TODAY_DEFINE['options_define'],
**where['csv']
)
self.write_error_json_raise(error_info.ERROR_SUCESS)
# ----------------------------------------------
# History Query
# ----------------------------------------------
@trans_func
def get_history_processing(self):
dbtrans = yield self.dbase_begin()
where = self.get_where_parames()
permission = self.permission_where()
if self.history_query_where_sql:
permission.update(self.history_query_where_sql)
result = yield self.history_query_sql.query_data(
dbtrans, where['where'], permission,
where['page'], where['sort']
)
self.write_error_json_raise(
error_info.ERROR_SUCESS,
results=self.conv_result(result) if result is not None else []
)
@trans_func
def get_history_sum_processing(self):
dbtrans = yield self.dbase_begin()
where = self.get_where_parames()
permission = self.permission_where()
if self.history_sum_query_where_sql:
permission.update(self.history_sum_query_where_sql)
result = yield self.history_sum_query_sql.query_data(
dbtrans, where['where'], permission,
where['page'], where['sort']
)
self.write_error_json_raise(
error_info.ERROR_SUCESS,
results=self.conv_result(result) if result is not None else []
)
@trans_func
def post_history_csv_processing(self):
where = self.get_where_parames_from_json()
permission = self.permission_where()
if self.history_query_where_sql:
permission.update(self.history_query_where_sql)
sql_str, sql_parames = self.history_query_sql.query_sql_string(
where['where'], permission,
where['page'], where['sort']
)
self.gsettings.csv_add_task(
self.session_userid_str(),
sql_str, sql_parames,
self.QUERY_TODAY_DEFINE['cnname_define'],
self.QUERY_TODAY_DEFINE['options_define'],
**where['csv']
)
self.write_error_json_raise(error_info.ERROR_SUCESS)
| 28.789773
| 89
| 0.609236
|
f991ad41b3417dc64b3f31cfdff92940f084ea74
| 14,359
|
py
|
Python
|
xcdat/dataset.py
|
tomvothecoder/xcdat
|
21dad38d20baffe1db097cd898a9b7fcf2470955
|
[
"Apache-2.0"
] | 3
|
2021-05-11T22:06:09.000Z
|
2021-06-22T15:58:56.000Z
|
xcdat/dataset.py
|
tomvothecoder/xcdat
|
21dad38d20baffe1db097cd898a9b7fcf2470955
|
[
"Apache-2.0"
] | 78
|
2021-04-07T17:13:26.000Z
|
2021-09-02T22:28:27.000Z
|
xcdat/dataset.py
|
tomvothecoder/xcdat
|
21dad38d20baffe1db097cd898a9b7fcf2470955
|
[
"Apache-2.0"
] | null | null | null |
"""Dataset module for functions related to an xarray.Dataset."""
from typing import Any, Dict, Hashable, List, Optional, Union
import pandas as pd
import xarray as xr
from xcdat import bounds # noqa: F401
from xcdat.logger import setup_custom_logger
logger = setup_custom_logger(__name__)
def open_dataset(
path: str, data_var: Optional[str] = None, **kwargs: Dict[str, Any]
) -> xr.Dataset:
"""Wrapper for ``xarray.open_dataset()`` that applies common operations.
Operations include:
- If the dataset has a time dimension, decode both CF and non-CF time units.
- Generate bounds for supported coordinates if they don't exist.
- Option to limit the Dataset to a single regular (non-bounds) data
variable while retaining any bounds data variables.
Parameters
----------
path : str
Path to Dataset.
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset, by default None.
kwargs : Dict[str, Any]
Additional arguments passed on to ``xarray.open_dataset``.
- Visit the xarray docs for accepted arguments [1]_.
- ``decode_times`` defaults to ``False`` to allow for the manual
decoding of non-CF time units.
Returns
-------
xr.Dataset
Dataset after applying operations.
Notes
-----
``xarray.open_dataset`` opens the file with read-only access. When you
modify values of a Dataset, even one linked to files on disk, only the
in-memory copy you are manipulating in xarray is modified: the original file
on disk is never touched.
References
----------
.. [1] https://xarray.pydata.org/en/stable/generated/xarray.open_dataset.html
Examples
--------
Import and call module:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path")
Keep a single variable in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path", keep_vars="tas")
Keep multiple variables in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path", keep_vars=["ts", "tas"])
"""
# NOTE: Using decode_times=False may add incorrect units for existing time
# bounds (becomes "days since 1970-01-01 00:00:00").
ds = xr.open_dataset(path, decode_times=False, **kwargs)
ds = infer_or_keep_var(ds, data_var)
if ds.cf.dims.get("T") is not None:
ds = decode_time_units(ds)
ds = ds.bounds.fill_missing()
return ds
def open_mfdataset(
paths: Union[str, List[str]],
data_var: Optional[str] = None,
**kwargs: Dict[str, Any],
) -> xr.Dataset:
"""Wrapper for ``xarray.open_mfdataset()`` that applies common operations.
Operations include:
- If the dataset has a time dimension, decode both CF and non-CF time units.
- Generate bounds for supported coordinates if they don't exist.
- Option to limit the Dataset to a single regular (non-bounds) data
variable while retaining any bounds data variables.
Parameters
----------
path : Union[str, List[str]]
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an
explicit list of files to open. Paths can be given as strings or as
pathlib Paths. If concatenation along more than one dimension is desired,
then ``paths`` must be a nested list-of-lists (see ``combine_nested``
for details). (A string glob will be expanded to a 1-dimensional list.)
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset, by default None.
kwargs : Dict[str, Any]
Additional arguments passed on to ``xarray.open_mfdataset`` and/or
``xarray.open_dataset``.
- Visit the xarray docs for accepted arguments, [2]_ and [3]_.
- ``decode_times`` defaults to ``False`` to allow for the manual
decoding of non-CF time units.
Returns
-------
xr.Dataset
Dataset after applying operations.
Notes
-----
``xarray.open_mfdataset`` opens the file with read-only access. When you
modify values of a Dataset, even one linked to files on disk, only the
in-memory copy you are manipulating in xarray is modified: the original file
on disk is never touched.
References
----------
.. [2] https://xarray.pydata.org/en/stable/generated/xarray.open_mfdataset.html
.. [3] https://xarray.pydata.org/en/stable/generated/xarray.open_dataset.html
Examples
--------
Import and call module:
>>> from xcdat.dataset import open_mfdataset
>>> ds = open_mfdataset(["file_path1", "file_path2"])
Keep a single variable in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_mfdataset(["file_path1", "file_path2"], keep_vars="tas")
Keep multiple variables in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_mfdataset(["file_path1", "file_path2"], keep_vars=["ts", "tas"])
"""
# NOTE: Using decode_times=False may add incorrect units for existing time
# bounds (becomes "days since 1970-01-01 00:00:00").
ds = xr.open_mfdataset(paths, decode_times=False, **kwargs)
ds = infer_or_keep_var(ds, data_var)
if ds.cf.dims.get("T") is not None:
ds = decode_time_units(ds)
ds = ds.bounds.fill_missing()
return ds
def infer_or_keep_var(dataset: xr.Dataset, data_var: Optional[str]) -> xr.Dataset:
"""Infer the data variable(s) or keep a specific one in the Dataset.
If ``data_var`` is None, then this function checks the number of
regular (non-bounds) data variables in the Dataset. If there is a single
regular data var, then it will add an 'xcdat_infer' attr pointing to it in
the Dataset. XCDAT APIs can then call `get_inferred_var()` to get the data
var linked to the 'xcdat_infer' attr. If there are multiple regular data
variables, the 'xcdat_infer' attr is not set and the Dataset is returned
as is.
If ``data_var`` is not None, then this function checks if the ``data_var``
exists in the Dataset and if it is a regular data var. If those checks pass,
it will subset the Dataset to retain that ``data_var`` and all bounds data
vars. An 'xcdat_infer' attr pointing to the ``data_var`` is also added
to the Dataset.
This utility function is useful for designing XCDAT APIs with an optional
``data_var`` kwarg. If ``data_var`` is None, an inference to the desired
data var is performed with a call to this function. Otherwise, perform the
API operation explicitly on ``data_var``.
Parameters
----------
dataset : xr.Dataset
The Dataset.
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset.
Returns
-------
xr.Dataset
The Dataset.
Raises
------
KeyError
If the specified data variable is not found in the Dataset.
KeyError
If the user specifies a bounds variable to keep.
"""
ds = dataset.copy()
# Make sure the "xcdat_infer" attr is None because a Dataset may be written
# with this attr already set.
ds.attrs["xcdat_infer"] = None
all_vars = ds.data_vars.keys()
bounds_vars = ds.bounds.names
regular_vars: List[Hashable] = list(set(all_vars) ^ set(bounds_vars))
if len(regular_vars) == 0:
logger.warning("This dataset only contains bounds data variables.")
if data_var is None:
if len(regular_vars) == 1:
ds.attrs["xcdat_infer"] = regular_vars[0]
elif len(regular_vars) > 1:
regular_vars_str = ", ".join(
f"'{var}'" for var in sorted(regular_vars) # type:ignore
)
logger.info(
"This dataset contains more than one regular data variable "
f"({regular_vars_str}). If desired, pass the `data_var` kwarg to "
"reduce down to one regular data var."
)
if data_var is not None:
if data_var not in all_vars:
raise KeyError(
f"The data variable '{data_var}' does not exist in the dataset."
)
if data_var in bounds_vars:
raise KeyError("Please specify a regular (non-bounds) data variable.")
ds = dataset[[data_var] + bounds_vars]
ds.attrs["xcdat_infer"] = data_var
return ds
def decode_time_units(dataset: xr.Dataset):
"""Decodes both CF and non-CF compliant time units.
``xarray`` uses the ``cftime`` module, which only supports CF compliant
time units [4]_. As a result, opening datasets with non-CF compliant
time units (months and years) will throw an error if ``decode_times=True``.
This function works around this issue by first checking if the time units
are CF or non-CF compliant. Datasets with CF compliant time units are passed
to ``xarray.decode_cf``. Datasets with non-CF compliant time units are
manually decoded by extracting the units and reference date, which are used
to generate an array of datetime values.
Parameters
----------
dataset : xr.Dataset
Dataset with non-decoded CF/non-CF compliant time units.
Returns
-------
xr.Dataset
Dataset with decoded time units.
Notes
-----
.. [4] https://unidata.github.io/cftime/api.html#cftime.num2date
Examples
--------
Decode non-CF compliant time units in a Dataset:
>>> from xcdat.dataset import decode_time_units
>>> ds = xr.open_dataset("file_path", decode_times=False)
>>> ds.time
<xarray.DataArray 'time' (time: 3)>
array([0, 1, 2])
Coordinates:
* time (time) int64 0 1 2
Attributes:
units: years since 2000-01-01
bounds: time_bnds
axis: T
long_name: time
standard_name: time
>>> ds = decode_time_units(ds)
>>> ds.time
<xarray.DataArray 'time' (time: 3)>
array(['2000-01-01T00:00:00.000000000', '2001-01-01T00:00:00.000000000',
'2002-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2001-01-01 2002-01-01
Attributes:
units: years since 2000-01-01
bounds: time_bnds
axis: T
long_name: time
standard_name: time
View time coordinate encoding information:
>>> ds.time.encoding
{'source': None, 'dtype': dtype('int64'), 'original_shape': (3,), 'units':
'years since 2000-01-01', 'calendar': 'proleptic_gregorian'}
"""
time = dataset["time"]
units_attr = time.attrs.get("units")
if units_attr is None:
raise KeyError(
"No 'units' attribute found for time coordinate. Make sure to open "
"the dataset with `decode_times=False`."
)
units, reference_date = units_attr.split(" since ")
non_cf_units_to_freq = {"months": "MS", "years": "YS"}
cf_compliant = units not in non_cf_units_to_freq.keys()
if cf_compliant:
dataset = xr.decode_cf(dataset, decode_times=True)
else:
# NOTE: The "calendar" attribute for units consisting of "months" or
# "years" is not factored when generating date ranges. The number of
# days in a month is not factored.
decoded_time = xr.DataArray(
data=pd.date_range(
start=reference_date,
periods=time.size,
freq=non_cf_units_to_freq[units],
),
dims=["time"],
attrs=dataset["time"].attrs,
)
decoded_time.encoding = {
"source": dataset.encoding.get("source"),
"dtype": time.dtype,
"original_shape": decoded_time.shape,
"units": units_attr,
# pandas.date_range() returns "proleptic_gregorian" by default
"calendar": "proleptic_gregorian",
}
dataset = dataset.assign_coords({"time": decoded_time})
return dataset
def get_inferred_var(dataset: xr.Dataset) -> xr.DataArray:
"""Gets the inferred data variable that is tagged in the Dataset.
This function looks for the "xcdat_infer" attribute pointing
to the desired data var in the Dataset, which can be set through
``xcdat.open_dataset()``, ``xcdat.open_mf_dataset()``, or manually.
This utility function is useful for designing XCDAT APIs with an optional
``data_var`` kwarg. If ``data_var`` is None, an inference to the desired
data var is performed with a call to this function. Otherwise, perform the
API operation explicitly on ``data_var``.
Parameters
----------
dataset : xr.Dataset
The Dataset.
Returns
-------
xr.DataArray
The inferred data variable.
Raises
------
KeyError
If the 'xcdat_infer' attr is not set in the Dataset.
KeyError
If the 'xcdat_infer' attr points to a non-existent data var.
KeyError
If the 'xcdat_infer' attr points to a bounds data var.
"""
inferred_var = dataset.attrs.get("xcdat_infer", None)
bounds_vars = dataset.bounds.names
if inferred_var is None:
raise KeyError(
"Dataset attr 'xcdat_infer' is not set so the desired data variable "
"cannot be inferred. You must pass the `data_var` kwarg to this operation."
)
else:
data_var = dataset.get(inferred_var, None)
if data_var is None:
raise KeyError(
"Dataset attr 'xcdat_infer' is set to non-existent data variable, "
f"'{inferred_var}'. Either pass the `data_var` kwarg to this operation, "
"or set 'xcdat_infer' to a regular (non-bounds) data variable."
)
if inferred_var in bounds_vars:
raise KeyError(
"Dataset attr `xcdat_infer` is set to the bounds data variable, "
f"'{inferred_var}'. Either pass the `data_var` kwarg, or set "
"'xcdat_infer' to a regular (non-bounds) data variable."
)
logger.info(
f"The data variable '{data_var.name}' was inferred from the Dataset attr "
"'xcdat_infer' for this operation."
)
return data_var.copy()
| 35.280098
| 89
| 0.639669
|
7e1fba544a654e0383d84db7715ac4577b9d3593
| 4,039
|
py
|
Python
|
mi_codigo.py
|
oscarordaz27/TheIoTLearningInitiative
|
54678a38d5b58d4f41c839d133ed3c4dc1cd6025
|
[
"Apache-2.0"
] | null | null | null |
mi_codigo.py
|
oscarordaz27/TheIoTLearningInitiative
|
54678a38d5b58d4f41c839d133ed3c4dc1cd6025
|
[
"Apache-2.0"
] | null | null | null |
mi_codigo.py
|
oscarordaz27/TheIoTLearningInitiative
|
54678a38d5b58d4f41c839d133ed3c4dc1cd6025
|
[
"Apache-2.0"
] | null | null | null |
##*****************************************************************************
## Copyright (c) 2014 IBM Corporation and other Contributors.
##
## All rights reserved. This program and the accompanying materials
## are made available under the terms of the Eclipse Public License v1.0
## which accompanies this distribution, and is available at
## http://www.eclipse.org/legal/epl-v10.html
##
## Contributors:
## IBM - Initial Contribution
##*****************************************************************************
## IoT Foundation QuickStart Driver
## A sample IBM Internet of Things Foundation Service client for Intel Internet of Things Gateway Solutions
import time
import client as mqtt
import json
import uuid
import sys
import pyupm_grove as grove
import pyupm_ttp223 as ttp223
# create the button object using GPIO pin 0
button = grove.GroveButton(8)
# create the TTP223 touch sensor object using GPIO pin 0
touch = ttp223.TTP223(7)
count = 0
#Class for retrieving CPU % utilisation
class CPUutil(object):
def __init__(self):
self.prev_idle = 0
self.prev_total = 0
self.new_idle = 0
self.new_total = 0
def get(self):
self.read()
delta_idle = self.new_idle - self.prev_idle
delta_total = self.new_total - self.prev_total
cpuut = 0.0
if (self.prev_total != 0) and (delta_total != 0):
cpuut = ((delta_total - delta_idle) * 100.0 / delta_total)
return cpuut
def read(self):
self.prev_idle = self.new_idle
self.prev_total = self.new_total
self.new_idle = 0;
self.new_total = 0;
with open('/proc/stat') as f:
line = f.readline()
parts = line.split()
if len(parts) >= 5:
self.new_idle = int(parts[4])
for part in parts[1:]:
self.new_total += int(part)
#Initialise class to retrieve CPU Usage
cpuutil = CPUutil()
macAddress = hex(uuid.getnode())[2:-1]
macAddress = format(long(macAddress, 16),'012x')
#remind the user of the mac address further down in code (post 'connecitng to QS')
#Set the variables for connecting to the Quickstart service
organization = "quickstart"
deviceType = "iotsample-gateway"
broker = ""
topic = "iot-2/evt/status/fmt/json"
username = ""
password = ""
error_to_catch = getattr(__builtins__,'FileNotFoundError', IOError)
try:
file_object = open("./device.cfg")
for line in file_object:
readType, readValue = line.split("=")
if readType == "org":
organization = readValue.strip()
elif readType == "type":
deviceType = readValue.strip()
elif readType == "id":
macAddress = readValue.strip()
elif readType == "auth-method":
username = "use-token-auth"
elif readType == "auth-token":
password = readValue.strip()
else:
print("please check the format of your config file") #will want to repeat this error further down if their connection fails?
file_object.close()
print("Configuration file found - connecting to the registered service")
except error_to_catch:
print("No config file found, connecting to the Quickstart service")
print("MAC address: " + macAddress)
#Creating the client connection
#Set clientID and broker
clientID = "d:" + organization + ":" + deviceType + ":" + macAddress
broker = organization + ".messaging.internetofthings.ibmcloud.com"
mqttc = mqtt.Client(clientID)
#Set authentication values, if connecting to registered service
if username is not "":
mqttc.username_pw_set(username, password=password)
mqttc.connect(host=broker, port=1883, keepalive=60)
#Publishing to IBM Internet of Things Foundation
mqttc.loop_start()
while mqttc.loop() == 0:
cpuutilvalue = cpuutil.get()
print cpuutilvalue
if button.value():
count = count + 1
if touch.isPressed():
count = count - 1
msg = json.JSONEncoder().encode({"d":{"cpuutil":cpuutilvalue,"contador":count}})
mqttc.publish(topic, payload=msg, qos=0, retain=False)
print "message published"
time.sleep(1)
pass
| 28.048611
| 130
| 0.661302
|
8ef85708ab32d77d250dc4442a1b579f02e718b3
| 498
|
py
|
Python
|
Tests/get_url_test.py
|
by09115/Flask-URLshortener
|
860eef75d86658f91a9316c253b512bf3aad0a6c
|
[
"MIT"
] | 1
|
2019-01-02T08:50:07.000Z
|
2019-01-02T08:50:07.000Z
|
Tests/get_url_test.py
|
by09115/Flask-URLshortener
|
860eef75d86658f91a9316c253b512bf3aad0a6c
|
[
"MIT"
] | null | null | null |
Tests/get_url_test.py
|
by09115/Flask-URLshortener
|
860eef75d86658f91a9316c253b512bf3aad0a6c
|
[
"MIT"
] | null | null | null |
from Tests import TestCaseBase, check_status_code
class GetUrlTest(TestCaseBase):
def setUp(self):
super(GetUrlTest, self).setUp()
self.short_url = self.save_url_request()
@check_status_code(302)
def test_success_get_url(self):
rv = self.get_url_request('b')
self.assertEqual(rv.handlers['location'], 'http://blog.jaehoon.kim')
return rv
@check_status_code(204)
def test_wrong_url(self):
return self.get_url_request('Pizza')
| 26.210526
| 76
| 0.680723
|
45ef0262e7c7912d35d44c757f2c32841d27c4d0
| 3,806
|
py
|
Python
|
idrac_memory/idrac_memory.py
|
anita-mithran/plugins
|
3c34f4b92b9c0ae985dc3b6abfb8c952aba00cd7
|
[
"BSD-2-Clause"
] | null | null | null |
idrac_memory/idrac_memory.py
|
anita-mithran/plugins
|
3c34f4b92b9c0ae985dc3b6abfb8c952aba00cd7
|
[
"BSD-2-Clause"
] | null | null | null |
idrac_memory/idrac_memory.py
|
anita-mithran/plugins
|
3c34f4b92b9c0ae985dc3b6abfb8c952aba00cd7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import json
import SNMPUtil
### Monitoring iDRAC Servers - Memory Performance
### It uses snmpwalk command to get the hadrware data from the iDRAC Servers.
### SNMPUtil.py is used to get the snmp raw data and parsed to get the output json
### Download and install the latest version of Site24x7 Linux Agent. The agent will execute the plugin and push the data to the Site24x7 server
###
### Author: Anita, Zoho Corp
### Language : Python
### Tested in Ubuntu
### Tested for snmp version 2c
### iDRAC Server Configuration Details
HOST = 'IDRAC_SERVER'
VERSION = '2c'
COMMUNITY = 'public'
MIB = 'MIB LOCATION'
### OIDS for Getting Memory Details
OIDS = {'memory' : ['memoryDeviceTable']}
### OID Attributes
hardware = {'memory' : ['memoryDeviceStateSettings','memoryDeviceStatus','memoryDeviceType','memoryDeviceSize','memoryDeviceSpeed']}
### Output Keys and their units
names = {'memory' : ['state','status','type',{'size': 'KB'},{'speed':'nanosec'}]}
class HardwareParser:
def __init__(self):
self.hardware = ''
self.oids = ''
self.pattern = ''
def getData(self):
output_data = {}
output_data['data'] = {}
output_data['units'] = {}
for _ in OIDS:
self.hardware = _
self.oids = OIDS[self.hardware]
for _ in self.oids:
### SNMPUtil module is used to get the snmp output for the input OIDS
snmpdata = SNMPUtil.SNMPPARSER('snmpwalk',HOST,VERSION,COMMUNITY,_,MIB,hardware[self.hardware])
### get Raw SNMP Output as a dict
self.snmp_data = snmpdata.getRawData()
### Method to parse the SNMP command output data
output_data = self.parseSNMPData(output_data)
return output_data
### Method to parse the SNMP command output data
def parseSNMPData(self,output_data):
jsondata = output_data['data']
unitdata = output_data['units']
for _ in self.snmp_data:
for index, __ in enumerate(hardware[self.hardware]) :
if __ in _:
name = ''.join(_.split("::")[1:]).replace('"','').split(' ')[0].split('.')
elementname = name[len(name)-1] # Name
value = ''.join(_.split()[1:]).replace('"','') # Value
if ':' in value:
val = value.split(':')[1:]
value = val[len(val)-1]
elem = names[self.hardware][index]
attribute = '' # Attribute Name
unit = '' # Attribute Value
if type(elem) is str: # Attributes with no units specified
attribute = elem
elif type(elem) is dict: # Attributes with units
attribute = list(elem.keys())[0]
unit = elem[list(elem.keys())[0]]
key = (attribute +'_'+elementname).replace(' ','')
jsondata[key] = value
if unit!='':
unitdata[key] = unit
output_data['data'] = jsondata
output_data['units'] = unitdata
return output_data
if __name__ == '__main__':
parser = HardwareParser()
result = {}
try:
output = parser.getData()
result = output['data']
result['units'] = output['units']
except ValueError as e:
result['msg'] = str(e)
print(json.dumps(result, indent=2, sort_keys=True))
| 34.6
| 143
| 0.522596
|
7366e295fe0670dd7b1f59261e70cf1c99a9395d
| 3,777
|
py
|
Python
|
code/src/nuvla/api/resources/data.py
|
nuvla/python-api
|
7b530aa049eee8c8cd654c27d749d46bf0d19e87
|
[
"Apache-2.0"
] | 4
|
2019-04-27T10:35:44.000Z
|
2019-05-05T13:04:28.000Z
|
code/src/nuvla/api/resources/data.py
|
nuvla/python-library
|
421abe6f583e1ce6a48670131faefe16b7e0bc12
|
[
"Apache-2.0"
] | 21
|
2019-02-22T07:30:41.000Z
|
2022-03-30T13:27:55.000Z
|
code/src/nuvla/api/resources/data.py
|
nuvla/python-library
|
421abe6f583e1ce6a48670131faefe16b7e0bc12
|
[
"Apache-2.0"
] | null | null | null |
import os
import requests
from typing import Optional, Union
from nuvla.api import Api as Nuvla
from .base import ResourceBase
class DataRecord(ResourceBase):
resource = 'data-record'
def create(self, data: dict, infra_service_id: str):
if isinstance(data, dict):
data.update({'infrastructure-service': infra_service_id})
return self.add(data)
class DataObjectS3(ResourceBase):
resource = 'data-object'
def __init__(self, nuvla: Nuvla):
super().__init__(nuvla)
def create(self, content: Union[str, bytes], bucket, object_path, s3_cred_id,
content_type='text/plain', name=None, description=None,
tags: Optional[list]=None, md5sum: Optional[str]=None) -> str:
"""Stores `content` in S3 defined by `s3_cred_id` and registers the
object as data-object in Nuvla. Returns data-object resource ID.
`content` and `content_type` should match (e.g. str and plain/text,
bytes and image/png).
"""
doc = {
"template": {
"name": name or object_path,
"description": description or name or object_path,
"credential": s3_cred_id,
"subtype": "generic",
"resource-type": "data-object-template",
"content-type": content_type,
"object": object_path,
"bucket": bucket,
"bytes": len(content),
"href": "data-object-template/generic"
}
}
if tags:
doc["template"].update({'tags': tags})
if md5sum:
doc["template"].update({'md5sum': md5sum})
data_object_id = self.add(doc)
# Upload data.
data_object = self.nuvla.get(data_object_id)
response = self.nuvla.operation(data_object, "upload")
upload_url = response.data['uri']
headers = {"content-type": content_type}
response = requests.put(upload_url, data=content, headers=headers)
response.raise_for_status()
# Set object is ready.
data_object = self.nuvla.get(data_object_id)
self.nuvla.operation(data_object, "ready")
return data_object_id
def get_content(self, object_id) -> Union[str, bytes]:
"""Returns string or bytes array by downloading from S3 the object
identified by `object_id`. They type of the returned object
corresponds to the `content-type` of the object (text or binary).
"""
data_object = self.nuvla.get(object_id)
response = self.nuvla.operation(data_object, 'download')
download_url = response.data['uri']
content_type = data_object.data['content-type']
headers = {'content-type': content_type}
response = requests.get(download_url, headers=headers)
return response.content
def get_to_file(self, object_id, filename=''):
"""Downloads from S3 and stores to a file the content of the S3 object
identified by `object_id`. If `filename` is not given, the base name
of 'object' attribute is taken instead.
"""
content = self.get_content(object_id)
if not filename:
data_object = self.nuvla.get(object_id)
filename = os.path.basename(data_object.data['object'])
if isinstance(content, bytes):
fmode = 'wb'
else:
fmode = 'w'
with open(filename, fmode) as fh:
fh.write(content)
def delete(self, resource_id) -> str:
"""Deletes object from S3 and its record from Nuvla identified by
`resource_id`. Returns ID of the deleted object.
"""
return super().delete(resource_id)
class DataSet:
pass
| 35.632075
| 81
| 0.608155
|
76713d714d0f442dbd2799332d27874779e6feb8
| 752
|
py
|
Python
|
data/train/python/76713d714d0f442dbd2799332d27874779e6feb8urls.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/76713d714d0f442dbd2799332d27874779e6feb8urls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/76713d714d0f442dbd2799332d27874779e6feb8urls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from django.conf.urls import include, url
from tastypie.api import Api
import views
from resources import *
api_version = 'v1'
v1_api = Api(api_name=api_version)
v1_api.register(UserResource())
v1_api.register(DeviceResource())
v1_api.register(JourneyResource())
v1_api.register(PositionResource())
v1_api.register(LogResource())
urlpatterns = [
url(r'^api/', include(v1_api.urls)),
url(r'^api/{}/fleet/((?P<id>[0-9]+|)/)?$'.format(api_version), views.FleetView.as_view()),
url(r'^api/{}/fleet/(?P<id>[0-9]+)/user/(?P<email>[\w.@+-]+|)/$'.format(api_version),
views.FleetUserView.as_view()),
url(r'^api/{}/fleet/(?P<id>[0-9]+)/device/(?P<email>[\w.@+-]+|)/$'.format(api_version),
views.FleetDeviceView.as_view()),
]
| 31.333333
| 94
| 0.666223
|
4f5f71825652216f68e4951cc739e5a41fb963bb
| 567
|
py
|
Python
|
backend/documentapp/migrations/0002_alter_document_profile_friend.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 15
|
2020-12-23T13:56:49.000Z
|
2021-12-10T11:04:23.000Z
|
backend/documentapp/migrations/0002_alter_document_profile_friend.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 41
|
2021-03-19T07:51:48.000Z
|
2021-11-22T09:45:46.000Z
|
backend/documentapp/migrations/0002_alter_document_profile_friend.py
|
Lenend-KPU/LBS-Platform
|
75ba24db8969248e74e9d974638977de1c0bc36a
|
[
"MIT"
] | 3
|
2021-03-24T15:18:24.000Z
|
2021-09-11T14:51:35.000Z
|
# Generated by Django 3.2 on 2021-05-05 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profileapp', '0001_initial'),
('documentapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='document',
name='profile_friend',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='document_profile_friend', to='profileapp.profile'),
),
]
| 27
| 157
| 0.656085
|
4a8d1ef899b0a950d4969725d30390e40a0afdc4
| 11,693
|
py
|
Python
|
tests/integration/ip_messaging/v1/service/test_channel.py
|
rekhafriesland/WhatsappSFDC
|
bd848e9ded19b9ab5e0eb714789e8f5d80fec8b4
|
[
"MIT"
] | null | null | null |
tests/integration/ip_messaging/v1/service/test_channel.py
|
rekhafriesland/WhatsappSFDC
|
bd848e9ded19b9ab5e0eb714789e8f5d80fec8b4
|
[
"MIT"
] | null | null | null |
tests/integration/ip_messaging/v1/service/test_channel.py
|
rekhafriesland/WhatsappSFDC
|
bd848e9ded19b9ab5e0eb714789e8f5d80fec8b4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ChannelTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create()
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.create()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.holodeck.assert_has_request(Request(
'get',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"channels": [],
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"previous_page_url": null,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels?PageSize=50&Page=0",
"next_page_url": null,
"key": "channels"
}
}
'''
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://ip-messaging.twilio.com/v1/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Channels/CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "friendly_name",
"unique_name": "unique_name",
"attributes": "{ \\"foo\\": \\"bar\\" }",
"type": "public",
"date_created": "2015-12-16T22:18:37Z",
"date_updated": "2015-12-16T22:18:37Z",
"created_by": "system",
"members_count": 0,
"messages_count": 0,
"url": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"links": {
"members": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members",
"messages": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages",
"invites": "https://chat.twilio.com/v1/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Channels/CHaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Invites",
"last_message": null
}
}
'''
))
actual = self.client.ip_messaging.v1.services(sid="ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.channels(sid="CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| 45.675781
| 166
| 0.570683
|
f04b57144bbc7c3e5f50aaac4b105588bd18011a
| 9,668
|
py
|
Python
|
components/isceobj/TopsProc/runDenseOffsets.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-09-15T08:10:52.000Z
|
2019-09-15T08:10:52.000Z
|
components/isceobj/TopsProc/runDenseOffsets.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
components/isceobj/TopsProc/runDenseOffsets.py
|
earthobservatory/isce2
|
655c46cc4add275879167b750a5e91f6d00f168e
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-06-24T20:20:18.000Z
|
2021-06-24T20:32:23.000Z
|
#
# Author: Joshua Cohen
# Copyright 2016
# Based on Piyush Agram's denseOffsets.py script
#
import os
import isce
import isceobj
import logging
from isceobj.Util.decorators import use_api
logger = logging.getLogger('isce.insar.DenseOffsets')
def runDenseOffsets(self):
'''
Run CPU / GPU version depending on user choice and availability.
'''
if not self.doDenseOffsets:
print('Dense offsets not requested. Skipping ....')
return
hasGPU = self.useGPU and self._insar.hasGPU()
if hasGPU:
runDenseOffsetsGPU(self)
else:
runDenseOffsetsCPU(self)
@use_api
def runDenseOffsetsCPU(self):
'''
Estimate dense offset field between merged master bursts and slave bursts.
'''
from mroipac.ampcor.DenseAmpcor import DenseAmpcor
os.environ['VRT_SHARED_SOURCE'] = "0"
print('\n============================================================')
print('Configuring DenseAmpcor object for processing...\n')
### Determine appropriate filenames
mf = 'master.slc'
sf = 'slave.slc'
if not ((self.numberRangeLooks == 1) and (self.numberAzimuthLooks==1)):
mf += '.full'
sf += '.full'
master = os.path.join(self._insar.mergedDirname, mf)
slave = os.path.join(self._insar.mergedDirname, sf)
####For this module currently, we need to create an actual file on disk
for infile in [master,slave]:
if os.path.isfile(infile):
continue
cmd = 'gdal_translate -of ENVI {0}.vrt {0}'.format(infile)
status = os.system(cmd)
if status:
raise Exception('{0} could not be executed'.format(status))
### Load the master object
m = isceobj.createSlcImage()
m.load(master + '.xml')
m.setAccessMode('READ')
# m.createImage()
### Load the slave object
s = isceobj.createSlcImage()
s.load(slave + '.xml')
s.setAccessMode('READ')
# s.createImage()
width = m.getWidth()
length = m.getLength()
objOffset = DenseAmpcor(name='dense')
objOffset.configure()
# objOffset.numberThreads = 1
### Configure dense Ampcor object
print('\nMaster frame: %s' % (mf))
print('Slave frame: %s' % (sf))
print('Main window size width: %d' % (self.winwidth))
print('Main window size height: %d' % (self.winhgt))
print('Search window size width: %d' % (self.srcwidth))
print('Search window size height: %d' % (self.srchgt))
print('Skip sample across: %d' % (self.skipwidth))
print('Skip sample down: %d' % (self.skiphgt))
print('Field margin: %d' % (self.margin))
print('Oversampling factor: %d' % (self.oversample))
print('Gross offset across: %d' % (self.rgshift))
print('Gross offset down: %d\n' % (self.azshift))
objOffset.setWindowSizeWidth(self.winwidth)
objOffset.setWindowSizeHeight(self.winhgt)
objOffset.setSearchWindowSizeWidth(self.srcwidth)
objOffset.setSearchWindowSizeHeight(self.srchgt)
objOffset.skipSampleAcross = self.skipwidth
objOffset.skipSampleDown = self.skiphgt
objOffset.oversamplingFactor = self.oversample
objOffset.setAcrossGrossOffset(self.rgshift)
objOffset.setDownGrossOffset(self.azshift)
objOffset.setFirstPRF(1.0)
objOffset.setSecondPRF(1.0)
if m.dataType.startswith('C'):
objOffset.setImageDataType1('mag')
else:
objOffset.setImageDataType1('real')
if s.dataType.startswith('C'):
objOffset.setImageDataType2('mag')
else:
objOffset.setImageDataType2('real')
objOffset.offsetImageName = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
objOffset.snrImageName = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
objOffset.covImageName = os.path.join(self._insar.mergedDirname, self._insar.covfile)
print('Output dense offsets file name: %s' % (objOffset.offsetImageName))
print('Output SNR file name: %s' % (objOffset.snrImageName))
print('Output covariance file name: %s' % (objOffset.covImageName))
print('\n======================================')
print('Running dense ampcor...')
print('======================================\n')
objOffset.denseampcor(m, s) ### Where the magic happens...
### Store params for later
self._insar.offset_width = objOffset.offsetCols
self._insar.offset_length = objOffset.offsetLines
self._insar.offset_top = objOffset.locationDown[0][0]
self._insar.offset_left = objOffset.locationAcross[0][0]
def runDenseOffsetsGPU(self):
'''
Estimate dense offset field between merged master bursts and slave bursts.
'''
from contrib.PyCuAmpcor import PyCuAmpcor
print('\n============================================================')
print('Configuring PyCuAmpcor object for processing...\n')
### Determine appropriate filenames
mf = 'master.slc'
sf = 'slave.slc'
if not ((self.numberRangeLooks == 1) and (self.numberAzimuthLooks==1)):
mf += '.full'
sf += '.full'
master = os.path.join(self._insar.mergedDirname, mf)
slave = os.path.join(self._insar.mergedDirname, sf)
####For this module currently, we need to create an actual file on disk
for infile in [master,slave]:
if os.path.isfile(infile):
continue
cmd = 'gdal_translate -of ENVI {0}.vrt {0}'.format(infile)
status = os.system(cmd)
if status:
raise Exception('{0} could not be executed'.format(status))
### Load the master object
m = isceobj.createSlcImage()
m.load(master + '.xml')
m.setAccessMode('READ')
# m.createImage()
### Load the slave object
s = isceobj.createSlcImage()
s.load(slave + '.xml')
s.setAccessMode('READ')
# s.createImage()
width = m.getWidth()
length = m.getLength()
objOffset = PyCuAmpcor.PyCuAmpcor()
objOffset.algorithm = 0
objOffset.deviceID = -1
objOffset.nStreams = 2
objOffset.derampMethod = 0
objOffset.masterImageName = master
objOffset.masterImageHeight = length
objOffset.masterImageWidth = width
objOffset.slaveImageName = slave
objOffset.slaveImageHeight = length
objOffset.slaveImageWidth = width
objOffset.numberWindowDown = (length-100-self.winhgt)//self.skiphgt
objOffset.numberWindowAcross = (width-100-self.winwidth)//self.skipwidth
objOffset.windowSizeHeight = self.winhgt
objOffset.windowSizeWidth = self.winwidth
objOffset.halfSearchRangeDown = self.srchgt
objOffset.halfSearchRangeAcross = self.srcwidth
objOffset.masterStartPixelDownStatic = 50
objOffset.masterStartPixelAcrossStatic = 50
objOffset.skipSampleDown = self.skiphgt
objOffset.skipSampleAcross = self.skipwidth
objOffset.corrSufaceOverSamplingMethod = 0
objOffset.corrSurfaceOverSamplingFactor = self.oversample
# generic control
objOffset.numberWindowDownInChunk = 10
objOffset.numberWindowAcrossInChunk = 10
objOffset.mmapSize = 16
objOffset.setupParams()
objOffset.setConstantGrossOffset(self.azshift,self.rgshift)
# objOffset.numberThreads = 1
### Configure dense Ampcor object
print('\nMaster frame: %s' % (mf))
print('Slave frame: %s' % (sf))
print('Main window size width: %d' % (self.winwidth))
print('Main window size height: %d' % (self.winhgt))
print('Search window size width: %d' % (self.srcwidth))
print('Search window size height: %d' % (self.srchgt))
print('Skip sample across: %d' % (self.skipwidth))
print('Skip sample down: %d' % (self.skiphgt))
print('Field margin: %d' % (self.margin))
print('Oversampling factor: %d' % (self.oversample))
print('Gross offset across: %d' % (self.rgshift))
print('Gross offset down: %d\n' % (self.azshift))
#Modify BIL in filename to BIP if needed and store for future use
prefix, ext = os.path.splitext(self._insar.offsetfile)
if ext == '.bil':
ext = '.bip'
self._insar.offsetfile = prefix + ext
objOffset.offsetImageName = os.path.join(self._insar.mergedDirname, self._insar.offsetfile)
objOffset.snrImageName = os.path.join(self._insar.mergedDirname, self._insar.snrfile)
print('Output dense offsets file name: %s' % (objOffset.offsetImageName))
print('Output SNR file name: %s' % (objOffset.snrImageName))
print('\n======================================')
print('Running dense ampcor...')
print('======================================\n')
objOffset.checkPixelInImageRange()
objOffset.runAmpcor()
#objOffset.denseampcor(m, s) ### Where the magic happens...
### Store params for later
self._insar.offset_width = objOffset.numberWindowAcross
self._insar.offset_length = objOffset.numberWindowDown
self._insar.offset_top = 50
self._insar.offset_left = 50
outImg = isceobj.createImage()
outImg.setDataType('FLOAT')
outImg.setFilename(objOffset.offsetImageName.decode('utf-8'))
outImg.setBands(2)
outImg.scheme = 'BIP'
outImg.setWidth(objOffset.numberWindowAcross)
outImg.setLength(objOffset.numberWindowDown)
outImg.setAccessMode('read')
outImg.renderHdr()
snrImg = isceobj.createImage()
snrImg.setFilename( objOffset.snrImageName.decode('utf8'))
snrImg.setDataType('FLOAT')
snrImg.setBands(1)
snrImg.setWidth(objOffset.numberWindowAcross)
snrImg.setLength(objOffset.numberWindowDown)
snrImg.setAccessMode('read')
snrImg.renderHdr()
if __name__ == '__main__' :
'''
Default routine to plug master.slc.full/slave.slc.full into
Dense Offsets Ampcor module.
'''
main()
| 32.884354
| 95
| 0.659082
|
fb74739f731dcb7ff233ad8f97be7b324c8711ca
| 1,940
|
py
|
Python
|
test.py
|
GooooM/test2
|
aa49d6e46e90bbaf95fe5e028a6b52c52799cc32
|
[
"MIT"
] | 1
|
2019-08-02T08:01:41.000Z
|
2019-08-02T08:01:41.000Z
|
test.py
|
GooooM/test2
|
aa49d6e46e90bbaf95fe5e028a6b52c52799cc32
|
[
"MIT"
] | null | null | null |
test.py
|
GooooM/test2
|
aa49d6e46e90bbaf95fe5e028a6b52c52799cc32
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import matplotlib.pyplot as plt
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(in_features=28 * 28, out_features=64)
self.fc2 = nn.Linear(in_features=64, out_features=128)
self.fc3 = nn.Linear(in_features=128, out_features=256)
self.fc4 = nn.Linear(in_features=256, out_features=10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.5], std=[0.5])])
dataset = torchvision.datasets.MNIST(root='./datasets', train=True, transform=transform, download=True)
data_loader = torch.utils.data.DataLoader(dataset=dataset, num_workers=0, batch_size=32, shuffle=True)
mlp = MLP()
loss = nn.CrossEntropyLoss()
optim = torch.optim.Adam(mlp.parameters(), lr=2e-4, betas=(0.5, 0.99), eps=1e-8)
EPOCHS=1
total_step = 0
list_loss = list()
list_acc = list()
for epoch in range(EPOCHS):
for i, data in enumerate(data_loader):
total_step = total_step + 1
input, label = data[0], data[1] #
# input shape [batch size ,channel, height, width]
input = input.view(input.shape[0], -1) # [batch size, channel*height*width]
classification_result = mlp(input) # [batch size, 10]
l = loss(classification_result, label)
list_loss.append(l.detach().item()) # item torch tensor 를 python의 형식으로 바꿔줌
optim.zero_grad()
l.backward()
optim.step()
print(l.detach().item())
plt.figure()
plt.plot(range(len(list_loss)), list_loss, linestyle='--')
plt.show()
| 31.290323
| 104
| 0.62268
|
8f17d599fcc1fdc1a1833f245677a90aa0bfe785
| 26,782
|
py
|
Python
|
test/test_metadata_processing.py
|
kevindurston21/YANOM-Note-O-Matic
|
c61845791bccfc043759eaa91e189d31d7276ae2
|
[
"MIT"
] | 7
|
2021-03-01T18:32:26.000Z
|
2022-02-05T22:45:33.000Z
|
test/test_metadata_processing.py
|
kevindurston21/YANOM-Note-O-Matic
|
c61845791bccfc043759eaa91e189d31d7276ae2
|
[
"MIT"
] | 50
|
2021-02-28T17:36:49.000Z
|
2022-03-08T20:09:04.000Z
|
test/test_metadata_processing.py
|
kevindurston21/YANOM-Note-O-Matic
|
c61845791bccfc043759eaa91e189d31d7276ae2
|
[
"MIT"
] | 3
|
2021-06-17T23:55:23.000Z
|
2021-08-09T10:29:54.000Z
|
import unittest
import pytest
from src.conversion_settings import ConversionSettings
from src.metadata_processing import MetaDataProcessor
class TestMetaDataProcessor(unittest.TestCase):
def setUp(self) -> None:
self.conversion_settings = ConversionSettings()
self.conversion_settings.set_quick_setting('gfm')
self.metadata_processor = MetaDataProcessor(self.conversion_settings)
def test_remove_tag_spaces_if_required(self):
test_data_sets = [
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/SubTag1",
"Tag1/SubTag1/SubSubTag1",
"Tag2"]
},
False,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/SubTag1",
"Tag1/SubTag1/SubSubTag1",
"Tag2"]
},
'removing spaces failed when there were no spaces'
),
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
False,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub-Tag1",
"Tag1/Sub-Tag1/Sub-Sub-Tag1",
"Tag2"]
},
'removing spaces failed when there were spaces'
),
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
True,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
'removing spaces failed when NOT required'
),
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing {test_set}'):
self.metadata_processor._metadata = test_set[0]
self.metadata_processor._spaces_in_tags = test_set[1]
self.metadata_processor.remove_tag_spaces_if_required()
self.assertEqual(test_set[2], self.metadata_processor.metadata, test_set[3])
def test_split_tags_if_required(self):
test_data_sets = [
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/SubTag1",
"Tag1/SubTag1/SubSubTag1",
"Tag2"]
},
True,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"SubTag1",
"SubSubTag1",
"Tag2"]
},
'splitting tags with no spaces failed'
),
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
True,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Sub Tag1",
"Sub Sub Tag1",
"Tag2"]
},
'splitting tags with spaces failed'
),
({'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
False,
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]
},
'splitting tags failed when NOT required'
),
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing {test_set}'):
self.metadata_processor._metadata = test_set[0]
self.metadata_processor._split_tags = test_set[1]
self.metadata_processor.split_tags_if_required()
self.assertTrue(
sorted(test_set[2]['tags']) == sorted(self.metadata_processor.metadata['tags']),
test_set[3])
def test_parse_dict_metadata(self):
test_data_sets = [
(['title', 'ctime', 'mtime'],
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678'
},
{'title': 'My Title',
'ctime': '1234',
'mtime': '5678'
},
'generating selected metadata failed for clean data'
),
(['title', 'mtime'],
{'title': 'My Title',
'ctime': '1234',
'content': 'my_content'
},
{'title': 'My Title'},
'generating metadata with "content" in metadata'
),
(['title', 'tags', 'ctime', 'mtime'],
{'title': 'My Title',
'ctime': '1234'
},
{'title': 'My Title',
'ctime': '1234'
},
'generating selected metadata failed for meta data missing one of the schema keys'
),
([],
{'title': 'My Title',
'ctime': '1234'
},
{},
'generating selected metadata failed for meta data missing a schema tag'
),
(['title', 'tags', 'ctime', 'mtime'],
{},
{},
'generating selected metadata failed for empty metadata'
),
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing {test_set}'):
self.metadata_processor._metadata = {}
self.metadata_processor._metadata_schema = test_set[0]
self.metadata_processor.parse_dict_metadata(test_set[1])
self.assertTrue(test_set[2] == self.metadata_processor.metadata, test_set[3])
self.metadata_processor._split_tags = True
self.metadata_processor._spaces_in_tags = False
self.metadata_processor._metadata = {}
self.metadata_processor._metadata_schema = ['tags', 'content']
raw_metadata = {'tags': ["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]}
expected_result = {'tags': ["Tag1",
"Sub-Tag1",
"Sub-Sub-Tag1",
"Tag2"]}
self.metadata_processor.parse_dict_metadata(raw_metadata)
self.assertTrue(
sorted(expected_result['tags']) == sorted(self.metadata_processor.metadata['tags']),
'generating metadata with tags failed')
def test_add_metadata_html_to_content(self):
content = '<head><title>-</title></head>'
self.metadata_processor._metadata = {'title': 'My Title'}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<head><title>My Title</title><meta title="My Title"/></head>',
new_content,
'title and meta data inserted incorrectly')
content = """<!DOCTYPE html>
<html lang="" xml:lang="" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8"/>
<meta content="pandoc" name="generator"/>
<meta content="width=device-width, initial-scale=1.0, user-scalable=yes" name="viewport"/>
<title>-</title>
<style>
html {
line-height: 1.5;
font-family: Georgia, serif;
font-size: 20px;
color: #1a1a1a;
background-color: #fdfdfd;
}
</style>
</head></html>
"""
self.metadata_processor._metadata = {'test': 'test-meta-content', 'test2': 'this is test2'}
expected_result = """<!DOCTYPE html>
<html lang="" xml:lang="" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8"/>
<meta content="pandoc" name="generator"/>
<meta content="width=device-width, initial-scale=1.0, user-scalable=yes" name="viewport"/>
<title>-</title>
<style>
html {
line-height: 1.5;
font-family: Georgia, serif;
font-size: 20px;
color: #1a1a1a;
background-color: #fdfdfd;
}
</style>
<meta test="test-meta-content"/><meta test2="this is test2"/></head></html>
"""
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual(expected_result, new_content, 'meta data inserted incorrectly')
content = '<title>-</title>'
self.metadata_processor._metadata = {'test': 'test-meta-content', 'test2': 'this is test2'}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<title>-</title>',
new_content, 'meta data inserted incorrectly when there is no head')
content = '<head></head><h1>hello</h1>'
self.metadata_processor._metadata = {'test': 'test-meta-content', 'test2': 'this is test2'}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<head><meta test="test-meta-content"/><meta test2="this is test2"/></head><h1>hello</h1>',
new_content, 'meta data inserted incorrectly')
content = '<head></head><h1>hello</h1>'
self.metadata_processor._metadata = {}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<head></head><h1>hello</h1>', new_content, 'meta data inserted incorrectly')
content = '<title>-</title>'
self.metadata_processor._metadata = {'test': 'test-meta-content', 'test2': 'this is test2'}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<title>-</title>', new_content, 'meta data inserted incorrectly')
self.metadata_processor._conversion_settings.markdown_conversion_input = 'pandoc_markdown'
content = '<head><title>-</title></head>'
self.metadata_processor._metadata = {'test': 'test-meta-content', 'test2': 'this is test2'}
new_content = self.metadata_processor.add_metadata_html_to_content(content)
self.assertEqual('<head><title>-</title><meta test="test-meta-content"/><meta test2="this is test2"/></head>',
new_content, 'meta data inserted incorrectly')
def test_parse_html_meta_data(self):
test_data_sets = [
(['title', 'creation_time'],
'<head><meta title="this is test2"/><meta charset="utf8"/><meta content="my_content"/><meta creation_time="test-meta-content"/></head>',
{'title': 'this is test2', 'creation_time': 'test-meta-content'},
'meta data not parsed correctly'),
(['title', 'creation_time'],
'<meta title="this is test2"/><meta creation_time="test-meta-content"/>',
{},
'meta data not ignored if no head section'),
(['title', 'creation_time'],
'<head><meta title="this is test2"/><meta not_valid="not_in_schema"/></head>',
{'title': 'this is test2'},
'meta data not parsed correctly when meta not in schema present'),
([],
'<head><meta title="this is test2"/><meta not_valid="not_in_schema"/></head>',
{},
'meta data not parsed correctly when there is no schema')
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing paring of html for meta tags {test_set}'):
self.metadata_processor._conversion_settings.metadata_schema = ['title', 'ctime', 'mtime', 'tag', 'content']
self.metadata_processor._metadata = {}
self.metadata_processor._metadata_schema = test_set[0]
self.metadata_processor.parse_html_metadata(test_set[1])
self.assertEqual(test_set[2], self.metadata_processor.metadata, test_set[3])
def test_format_tag_metadata_if_required(self):
self.metadata_processor._split_tags = True
self.metadata_processor._spaces_in_tags = False
self.metadata_processor._metadata = {'tags': ["Tag1",
"Tag1/Sub Tag1",
"Tag1/Sub Tag1/Sub Sub Tag1",
"Tag2"]}
self.metadata_processor.format_tag_metadata_if_required()
self.assertEqual(sorted(["Tag1", "Sub-Tag1", "Sub-Sub-Tag1", "Tag2"]),
sorted(self.metadata_processor.metadata['tags']),
'formatting tags if required failed')
def test_parse_md_metadata(self):
test_data_sets = [
('Hello',
['title', 'tag', 'ctime', 'mtime'],
'Hello',
'no meta data, content was incorrect',
{},
'no meta data to parse, resulted in having metadata'
),
('---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
['title', 'tag', 'ctime', 'mtime'],
'Hello',
'with md metadata, content was incorrect',
{'title': 'Hello, world!'},
'with md metadata to parse, incorrect metadata'
),
('---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
[''],
'Hello',
'with md metadata and empty schema, content was incorrect',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'with md metadata and empty schema, incorrect metadata'
),
('---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\ncontent: my content\n---\n\nHello',
['title', 'layout', 'ctime', 'mtime', 'content'],
'Hello',
'with md metadata, content was incorrect',
{'title': 'Hello, world!', 'layout': 'post', 'content': 'my content'},
'with md metadata to parse, incorrect metadata'
),
('---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
['ctime', 'mtime'],
'Hello',
'with md metadata and no vlaid matches in schema, content was incorrect',
{},
'with md metadata and no vlaid matches in schema, incorrect metadata'
),
('---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
[],
'Hello',
'with md metadata and empty schema, content was incorrect',
{},
'with md metadata and empty schema, incorrect metadata'
),
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing parsing meta data from MD {test_set}'):
md_string = test_set[0]
self.metadata_processor._metadata = {}
self.metadata_processor._metadata_schema = test_set[1]
new_content = self.metadata_processor.parse_md_metadata(md_string)
self.assertEqual(test_set[2], new_content, test_set[3])
self.assertTrue(test_set[4] == self.metadata_processor.metadata, test_set[5])
def test_add_metadata_md_to_content(self):
test_data_sets = [
('Hello',
{},
'Hello',
'no meta data, content was incorrect'
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
'good meta string and content failed'
)
]
for test_set in test_data_sets:
with self.subTest(msg=f'Testing adding meta data to MD {test_set}'):
content = test_set[0]
self.metadata_processor._metadata = test_set[1]
new_content = self.metadata_processor.add_metadata_md_to_content(content)
self.assertEqual(test_set[2], new_content, test_set[3])
@pytest.mark.parametrize(
'md_string, markdown_conversion_input, expected', [
(
"---\nctime: '202102122352'\nmtime: '202104242208'\ntag:\n- Tag1\n- Tag1/SubTag1\n- Tag1/SubTag1/SubSubTag1\n- Tag2\ntitle: test page\n---\n\n# This is H1",
'obsidian',
'ctime: 202102122352\nmtime: 202104242208\ntag: #Tag1, #Tag1/SubTag1, #Tag1/SubTag1/SubSubTag1, #Tag2title: test page\n\nhello'),
("---\nctime: '202102122352'\nmtime: '202104242208'\ntag: \ntitle: test page\n---\n\n# This is H1",
'obsidian',
'ctime: 202102122352\nmtime: 202104242208\ntitle: test page\n\nhello'),
("# This is H1",
'obsidian',
"hello"),
(
"---\nctime: '202102122352'\nmtime: '202104242208'\ntag:\n- Tag1\n- Tag1/SubTag1\n- Tag1/SubTag1/SubSubTag1\n- Tag2\ntitle: test page\n---\n\n# This is H1",
'pandoc_markdown_strict',
'ctime: 202102122352\nmtime: 202104242208\ntag: #Tag1, #Tag1/SubTag1, #Tag1/SubTag1/SubSubTag1, #Tag2title: test page\n\nhello'),
]
)
def test_add_text_metadata_to_content(md_string, markdown_conversion_input, expected):
conversion_settings = ConversionSettings()
conversion_settings.markdown_conversion_input = markdown_conversion_input
conversion_settings.metadata_schema = ['title', 'ctime', 'mtime', 'tag']
metadata_processor = MetaDataProcessor(conversion_settings)
metadata_processor.parse_md_metadata(md_string)
content = "hello"
result = metadata_processor.add_text_metadata_to_content(content)
assert result == expected
@pytest.mark.parametrize(
'md_string, expected', [
(
"---\nctime: '202102122352'\nmtime: '202104242208'\ntag:\n- Tag1\n- Tag1/SubTag1\n- Tag1/SubTag1/SubSubTag1\n- Tag2\ntitle: test page\n---\n\n# This is H1",
{'ctime': '202102122352', 'mtime': '202104242208',
'tag': ['#Tag1', '#Tag1/SubTag1', '#Tag1/SubTag1/SubSubTag1', '#Tag2'], 'title': 'test page'}),
(
"---\nctime: '202102122352'\nmtime: '202104242208'\ntags:\n- Tag1\n- Tag1/SubTag1\n- Tag1/SubTag1/SubSubTag1\n- Tag2\ntitle: test page\n---\n\n# This is H1",
{'ctime': '202102122352', 'mtime': '202104242208',
'tags': ['#Tag1', '#Tag1/SubTag1', '#Tag1/SubTag1/SubSubTag1', '#Tag2'], 'title': 'test page'}),
]
)
def test_add_tag_prefix_if_required(md_string, expected):
conversion_settings = ConversionSettings()
conversion_settings.tag_prefix = '#'
conversion_settings.metadata_schema = ['']
metadata_processor = MetaDataProcessor(conversion_settings)
# md_string = "---\nctime: '202102122352'\nmtime: '202104242208'\ntag:\n- Tag1\n- Tag1/SubTag1\n- Tag1/SubTag1/SubSubTag1\n- Tag2\ntitle: test page\n---\n\n# This is H1"
# expected = {'ctime': '202102122352', 'mtime': '202104242208', 'tag': ['#Tag1', '#Tag1/SubTag1', '#Tag1/SubTag1/SubSubTag1', '#Tag2'], 'title': 'test page'}
metadata_processor.parse_md_metadata(md_string)
metadata_processor.add_tag_prefix_if_required()
assert metadata_processor.metadata == expected
@pytest.mark.parametrize(
'html_string, expected, schema', [
(
'<head><title>test page</title><meta title="test page"/><meta ctime="202102122352"/><meta mtime="202104242208"/><meta tag="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head><h1>This is H1</h1',
{'ctime': '202102122352', 'mtime': '202104242208',
'tag': ['Tag1', 'Tag1/SubTag1', 'Tag1/SubTag1/SubSubTag1', 'Tag2'], 'title': 'test page'},
['title', 'ctime', 'mtime', 'tag']
),
(
'<head><title>test page</title><meta title="test page"/><meta ctime="202102122352"/><meta mtime="202104242208"/><meta tags="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head><h1>This is H1</h1',
{'ctime': '202102122352', 'mtime': '202104242208',
'tags': ['Tag1', 'Tag1/SubTag1', 'Tag1/SubTag1/SubSubTag1', 'Tag2'], 'title': 'test page'},
['title', 'ctime', 'mtime', 'tags']),
(
'<head><title>test page</title><meta title="test page"/><meta ctime="202102122352"/><meta mtime="202104242208"/><meta tags="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head><h1>This is H1</h1',
{'ctime': '202102122352', 'mtime': '202104242208',
'tags': ['Tag1', 'Tag1/SubTag1', 'Tag1/SubTag1/SubSubTag1', 'Tag2'], 'title': 'test page'},
[''])
]
)
def test_convert_tag_sting_to_tag_list(html_string, expected, schema):
conversion_settings = ConversionSettings()
conversion_settings.metadata_schema = schema
metadata_processor = MetaDataProcessor(conversion_settings)
metadata_processor.parse_html_metadata(html_string)
metadata_processor.convert_tag_sting_to_tag_list()
assert metadata_processor.metadata == expected
@pytest.mark.parametrize(
'html_string, expected', [
(
'<head><title>test page</title><meta title="test page"/><meta ctime="202102122352"/><meta mtime="202104242208"/><meta tag="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head><h1>This is H1</h1',
['SubSubTag1', 'SubTag1', 'Tag1', 'Tag2']
),
(
'<head><title>test page</title><meta title="test page"/><meta ctime="202102122352"/><meta mtime="202104242208"/><meta tags="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head><h1>This is H1</h1',
['SubSubTag1', 'SubTag1', 'Tag1', 'Tag2']
),
]
)
def test_split_tags_if_required_with_tags_key(html_string, expected):
conversion_settings = ConversionSettings()
conversion_settings.split_tags = True
conversion_settings.metadata_schema = ['']
metadata_processor = MetaDataProcessor(conversion_settings)
metadata_processor.parse_html_metadata(html_string)
metadata_processor.convert_tag_sting_to_tag_list()
if 'tags' in metadata_processor.metadata:
assert sorted(metadata_processor.metadata['tags']) == expected
if 'tag' in metadata_processor.metadata:
assert sorted(metadata_processor.metadata['tag']) == expected
@pytest.mark.parametrize(
'content, metadata, front_matter_format, expected', [
('Hello',
{},
'yaml',
'Hello',
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'yaml',
'---\nexcerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n---\n\nHello',
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'toml',
'+++\nexcerpt = "tl;dr"\nlayout = "post"\ntitle = "Hello, world!"\n\n+++\n\nHello',
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'json',
'{\n "excerpt": "tl;dr",\n "layout": "post",\n "title": "Hello, world!"\n}\n\n\nHello',
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'text',
'excerpt: tl;dr\nlayout: post\ntitle: Hello, world!\n\nHello',
),
('Hello',
{'excerpt': 'tl;dr', 'layout': 'post', 'title': 'Hello, world!'},
'none',
'Hello',
),
]
)
def test_add_metadata_md_to_content(content, metadata, front_matter_format, expected):
conversion_settings = ConversionSettings()
conversion_settings.front_matter_format = front_matter_format
metadata_processor = MetaDataProcessor(conversion_settings)
metadata_processor._metadata = metadata
result = metadata_processor.add_metadata_md_to_content(content)
assert result == expected
def test_add_metadata_html_to_content():
conversion_settings = ConversionSettings()
conversion_settings.front_matter_format = 'yaml'
metadata_processor = MetaDataProcessor(conversion_settings)
metadata_processor._metadata = {'title': 'My Title',
'ctime': '1234',
'mtime': '5678',
'tags':
["Tag1",
"Tag1/SubTag1",
"Tag1/SubTag1/SubSubTag1",
"Tag2"]
}
content = """<!DOCTYPE html>
<html lang="" xml:lang="" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8"/>
<meta content="pandoc" name="generator"/>
<meta content="width=device-width, initial-scale=1.0, user-scalable=yes" name="viewport"/>
<title>-</title>
<style>
html {
line-height: 1.5;
font-family: Georgia, serif;
font-size: 20px;
color: #1a1a1a;
background-color: #fdfdfd;
}
</style>
</head></html>
"""
expected = """<!DOCTYPE html>
<html lang="" xml:lang="" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8"/>
<meta content="pandoc" name="generator"/>
<meta content="width=device-width, initial-scale=1.0, user-scalable=yes" name="viewport"/>
<title>My Title</title>
<style>
html {
line-height: 1.5;
font-family: Georgia, serif;
font-size: 20px;
color: #1a1a1a;
background-color: #fdfdfd;
}
</style>
<meta title="My Title"/><meta ctime="1234"/><meta mtime="5678"/><meta tags="Tag1,Tag1/SubTag1,Tag1/SubTag1/SubSubTag1,Tag2"/></head></html>
"""
result = metadata_processor.add_metadata_html_to_content(content)
assert result == expected
| 41.013783
| 208
| 0.546412
|
d921c25f412ab97856b6acca98fb1c0eec05182c
| 198
|
py
|
Python
|
test/suite/E40.py
|
shardros/autopep8
|
2ab2ea74668b10f3910f3d5b9526494fa5671ca1
|
[
"MIT"
] | 3,459
|
2015-01-03T15:53:43.000Z
|
2022-03-31T16:33:01.000Z
|
test/suite/E40.py
|
hayata-yamamoto/autopep8
|
107e29dce22c7b367a36633a78735278e4ad4288
|
[
"MIT"
] | 435
|
2015-01-03T12:58:44.000Z
|
2022-03-29T12:37:13.000Z
|
test/suite/E40.py
|
hayata-yamamoto/autopep8
|
107e29dce22c7b367a36633a78735278e4ad4288
|
[
"MIT"
] | 279
|
2015-03-16T16:34:51.000Z
|
2022-03-26T23:58:48.000Z
|
#: E401
import os, sys
#: Okay
import os
import sys
from subprocess import Popen, PIPE
from myclass import MyClass
from foo.bar.yourclass import YourClass
import myclass
import foo.bar.yourclass
| 14.142857
| 39
| 0.792929
|
1bd89a44d0a79cede325cced9b773226e4b312c7
| 22,805
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/operations/_virtual_network_peerings_operations.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/operations/_virtual_network_peerings_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/operations/_virtual_network_peerings_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
virtual_network_peering_name, # type: str
virtual_network_peering_parameters, # type: "_models.VirtualNetworkPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkPeering"]
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkPeeringListResult"]
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_10_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
| 51.829545
| 250
| 0.680246
|
37015e4976ee1e12e6941d729dff108993fe558e
| 3,292
|
py
|
Python
|
galaxylearning/core/strategy.py
|
ZJU-DistributedAI/GalaxyLearning
|
b56b7f6d74a93a06f439ffb8c206b3152b916e92
|
[
"Apache-2.0"
] | 4
|
2020-03-16T14:02:16.000Z
|
2021-10-03T10:24:29.000Z
|
galaxylearning/core/strategy.py
|
ZJU-DistributedAI/GalaxyLearning
|
b56b7f6d74a93a06f439ffb8c206b3152b916e92
|
[
"Apache-2.0"
] | null | null | null |
galaxylearning/core/strategy.py
|
ZJU-DistributedAI/GalaxyLearning
|
b56b7f6d74a93a06f439ffb8c206b3152b916e92
|
[
"Apache-2.0"
] | 3
|
2020-04-10T10:52:42.000Z
|
2021-11-23T04:01:43.000Z
|
# federate strategies
from enum import Enum
import galaxylearning.exceptions.fl_expection as exceptions
class WorkModeStrategy(Enum):
WORKMODE_STANDALONE = "standalone"
WORKMODE_CLUSTER = "cluster"
class FederateStrategy(Enum):
FED_AVG = "fed_avg"
FED_DISTILLATION = "fed_distillation"
class RunTimeStrategy(Enum):
L1_LOSS = "L1loss"
MSE_LOSS = "MSELoss"
CROSSENTROPY_LOSS = "CrossEntropyLoss"
NLL_LOSS = "NLLLoss"
POISSIONNLL_LOSS = "PoissonNLLLoss"
KLDIV_LOSS = "KLDivLoss"
BCE_LOSS = "BCELoss"
BCEWITHLOGITS_Loss = "BCEWithLogitsLoss"
MARGINRANKING_Loss = "MarginRankingalaxylearningoss"
OPTIM_SGD = "SGD"
OPTIM_ADAM = "Adam"
class StrategyFactory(object):
def __init__(self):
pass
class TrainStrategyFatorcy(StrategyFactory):
def __init__(self, optimizer, learning_rate, loss_function, batch_size, epoch):
super(StrategyFactory, self).__init__()
self.optimizer = optimizer
self.learning_rate = learning_rate
self.loss_function = loss_function
self.batch_size = batch_size
self.epoch = epoch
def get_loss_functions(self):
loss_functions = [RunTimeStrategy.L1_LOSS, RunTimeStrategy.MSE_LOSS, RunTimeStrategy.CROSSENTROPY_LOSS,
RunTimeStrategy.NLL_LOSS, RunTimeStrategy.POISSIONNLL_LOSS,
RunTimeStrategy.KLDIV_LOSS, RunTimeStrategy.BCE_LOSS, RunTimeStrategy.BCEWITHLOGITS_Loss,
RunTimeStrategy.MARGINRANKING_Loss]
return loss_functions
def get_fed_strategies(self):
fed_strategies = [FederateStrategy.FED_AVG, FederateStrategy.FED_DISTILLATION]
return fed_strategies
def get_optim_strategies(self):
optim_strategies = [RunTimeStrategy.OPTIM_SGD, RunTimeStrategy.OPTIM_ADAM]
return optim_strategies
def set_optimizer(self, optimizer):
optim_strategies = self.get_optim_strategies()
if optimizer in optim_strategies:
self.optimizer = optimizer.value
else:
raise exceptions.GLException("optimizer strategy not found")
def get_optimizer(self):
return self.optimizer
def set_learning_rate(self, learning_rate):
self.learning_rate = learning_rate
def get_learning_rate(self):
return self.learning_rate
def set_loss_function(self, loss_function):
loss_functions = self.get_loss_functions()
if loss_function in loss_functions:
self.loss_function = loss_function.value
else:
raise exceptions.GLException("loss strategy not found")
def get_loss_function(self):
return self.loss_function
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def get_batch_size(self):
return self.batch_size
def set_epoch(self, epoch):
self.epoch = epoch
def get_epoch(self):
return self.epoch
# def set_aggregate_strategy(self, aggregate_strategy):
# self.aggregate_strategy = aggregate_strategy
#
# def get_aggregate_strategy(self):
# return self.aggregate_strategy
class TestStrategyFactory(StrategyFactory):
def __init__(self):
super(TestStrategyFactory, self).__init__()
| 30.201835
| 115
| 0.701397
|
c1274d85d3451da814afc40fedba398f83895daa
| 1,258
|
py
|
Python
|
_unittests/ut_documentation/test_LONG_run_notebooks_pydata_2018_onnx.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | null | null | null |
_unittests/ut_documentation/test_LONG_run_notebooks_pydata_2018_onnx.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | 16
|
2016-11-13T19:52:35.000Z
|
2021-12-29T10:59:41.000Z
|
_unittests/ut_documentation/test_LONG_run_notebooks_pydata_2018_onnx.py
|
sdpython/jupytalk
|
34abdf128de24becb21a9f08f243c3a74dadbfd9
|
[
"MIT"
] | 4
|
2016-09-10T10:44:50.000Z
|
2021-09-22T16:28:56.000Z
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=13s)
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
from pyquickhelper.pycode import add_missing_development_version
import jupytalk
class TestLONGFunctionTestNotebook2018PyDataONNX(unittest.TestCase):
def setUp(self):
add_missing_development_version(
["jyquickhelper", "pymyinstall"], __file__, hide=True)
def test_notebook_pydata_onnx(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
import pymyinstall
self.assertTrue(jupytalk is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks", "2018", "pyparis")
files = [_ for _ in os.listdir(
folder) if '.png' in _ or '.csv' in _ or '.jpg' in _]
test_notebook_execution_coverage(__file__, "pyparis", folder,
this_module_name="jupytalk", fLOG=fLOG,
copy_files=files, modules=[pymyinstall])
if __name__ == "__main__":
unittest.main()
| 31.45
| 81
| 0.621622
|
0d8d08e1882432a3d3425628da72a434833020d3
| 1,722
|
py
|
Python
|
scripts/provision-user.py
|
ideation-ai/fhir-works-on-aws-deployment
|
cafbad249618b61eb06a14b137c0bf4fb75fcc29
|
[
"Apache-2.0"
] | 2
|
2021-05-06T21:24:19.000Z
|
2021-05-27T17:16:09.000Z
|
scripts/provision-user.py
|
ideation-ai/fhir-works-on-aws-deployment
|
cafbad249618b61eb06a14b137c0bf4fb75fcc29
|
[
"Apache-2.0"
] | 6
|
2021-04-27T11:09:33.000Z
|
2021-09-02T13:13:42.000Z
|
scripts/provision-user.py
|
ideation-ai/fhir-works-on-aws-deployment
|
cafbad249618b61eb06a14b137c0bf4fb75fcc29
|
[
"Apache-2.0"
] | 3
|
2020-12-18T17:02:18.000Z
|
2020-12-29T02:45:22.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import boto3
import sys
'''
example run:
python3 provision-user.py <UserPoolId> <ClientId> <Region>
python3 provision-user.py us-west-2_yk8jbgpWM 12pgvi3gsl32qp9h8lg130arr0 us-west-2
'''
client = boto3.client('cognito-idp', region_name=sys.argv[3])
response = client.admin_create_user(
UserPoolId=sys.argv[1],
Username='workshopuser',
UserAttributes=[
{
'Name': 'email',
'Value': 'dummy@email.com'
},
{
'Name': 'email_verified',
'Value': 'True'
}
],
ValidationData=[
{
'Name': 'email',
'Value': 'dummy@email.com'
}
],
TemporaryPassword='Master123!',
MessageAction='SUPPRESS'
)
response = client.initiate_auth(
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
'USERNAME': 'workshopuser',
'PASSWORD': 'Master123!'
},
ClientId=sys.argv[2]
)
sessionid = response['Session']
response = client.respond_to_auth_challenge(
ClientId=sys.argv[2],
ChallengeName='NEW_PASSWORD_REQUIRED',
Session=sessionid,
ChallengeResponses={
'USERNAME': 'workshopuser',
'NEW_PASSWORD': 'Master123!'
}
)
response = client.admin_add_user_to_group(
UserPoolId=sys.argv[1],
Username='workshopuser',
GroupName='practitioner'
)
response = client.initiate_auth(
AuthFlow='USER_PASSWORD_AUTH',
AuthParameters={
'USERNAME': 'workshopuser',
'PASSWORD': 'Master123!'
},
ClientId=sys.argv[2]
)
sessionid = response['AuthenticationResult']['AccessToken']
print(sessionid)
| 21.259259
| 82
| 0.631243
|
aa280bac3ed9074681898fafc29ec222bec44b85
| 10,360
|
py
|
Python
|
layers.py
|
qwoprocks/SAPD
|
2aaedab7d7f16c504be4848b233b01eac7fcdda8
|
[
"Apache-2.0"
] | 104
|
2020-03-05T17:41:29.000Z
|
2022-03-09T05:48:01.000Z
|
layers.py
|
qwoprocks/SAPD
|
2aaedab7d7f16c504be4848b233b01eac7fcdda8
|
[
"Apache-2.0"
] | 20
|
2020-03-08T10:59:40.000Z
|
2022-03-12T00:19:04.000Z
|
layers.py
|
qwoprocks/SAPD
|
2aaedab7d7f16c504be4848b233b01eac7fcdda8
|
[
"Apache-2.0"
] | 15
|
2020-05-03T02:28:14.000Z
|
2021-07-26T02:01:52.000Z
|
# import keras
from tensorflow import keras
import tensorflow as tf
class BatchNormalization(keras.layers.BatchNormalization):
"""
Identical to keras.layers.BatchNormalization, but adds the option to freeze parameters.
"""
def __init__(self, freeze, *args, **kwargs):
self.freeze = freeze
super(BatchNormalization, self).__init__(*args, **kwargs)
# set to non-trainable if freeze is true
self.trainable = not self.freeze
def call(self, inputs, training=None, **kwargs):
# return super.call, but set training
if not training:
return super(BatchNormalization, self).call(inputs, training=False)
else:
return super(BatchNormalization, self).call(inputs, training=(not self.freeze))
def get_config(self):
config = super(BatchNormalization, self).get_config()
config.update({'freeze': self.freeze})
return config
class ClipBoxes(keras.layers.Layer):
"""
Keras layer to clip box values to lie inside a given shape.
"""
def call(self, inputs, **kwargs):
image, boxes = inputs
shape = keras.backend.cast(keras.backend.shape(image), keras.backend.floatx())
height = shape[1]
width = shape[2]
x1 = tf.clip_by_value(boxes[:, :, 0], 0, width - 1)
y1 = tf.clip_by_value(boxes[:, :, 1], 0, height - 1)
x2 = tf.clip_by_value(boxes[:, :, 2], 0, width - 1)
y2 = tf.clip_by_value(boxes[:, :, 3], 0, height - 1)
return keras.backend.stack([x1, y1, x2, y2], axis=2)
def compute_output_shape(self, input_shape):
return input_shape[1]
def filter_detections(
boxes,
classification,
class_specific_filter=True,
nms=True,
score_threshold=0.01,
max_detections=300,
nms_threshold=0.5,
):
"""
Filter detections using the boxes and classification values.
Args
boxes: Tensor of shape (num_boxes, 4) containing the boxes in (x1, y1, x2, y2) format.
classification: Tensor of shape (num_boxes, num_classes) containing the classification scores.
other: List of tensors of shape (num_boxes, ...) to filter along with the boxes and classification scores.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms: Flag to enable/disable non maximum suppression.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
Returns
A list of [boxes, scores, labels, other[0], other[1], ...].
boxes is shaped (max_detections, 4) and contains the (x1, y1, x2, y2) of the non-suppressed boxes.
scores is shaped (max_detections,) and contains the scores of the predicted class.
labels is shaped (max_detections,) and contains the predicted label.
other[i] is shaped (max_detections, ...) and contains the filtered other[i] data.
In case there are less than max_detections detections, the tensors are padded with -1's.
"""
def _filter_detections(scores_, labels_):
# threshold based on score
# (num_score_keeps, 1)
indices_ = tf.where(keras.backend.greater(scores_, score_threshold))
if nms:
# (num_score_keeps, 4)
filtered_boxes = tf.gather_nd(boxes, indices_)
# In [4]: scores = np.array([0.1, 0.5, 0.4, 0.2, 0.7, 0.2])
# In [5]: tf.greater(scores, 0.4)
# Out[5]: <tf.Tensor: id=2, shape=(6,), dtype=bool, numpy=array([False, True, False, False, True, False])>
# In [6]: tf.where(tf.greater(scores, 0.4))
# Out[6]:
# <tf.Tensor: id=7, shape=(2, 1), dtype=int64, numpy=
# array([[1],
# [4]])>
#
# In [7]: tf.gather(scores, tf.where(tf.greater(scores, 0.4)))
# Out[7]:
# <tf.Tensor: id=15, shape=(2, 1), dtype=float64, numpy=
# array([[0.5],
# [0.7]])>
filtered_scores = keras.backend.gather(scores_, indices_)[:, 0]
# perform NMS
nms_indices = tf.image.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections,
iou_threshold=nms_threshold)
# filter indices based on NMS
# (num_score_nms_keeps, 1)
indices_ = keras.backend.gather(indices_, nms_indices)
# add indices to list of all indices
# (num_score_nms_keeps, )
labels_ = tf.gather_nd(labels_, indices_)
# (num_score_nms_keeps, 2)
indices_ = keras.backend.stack([indices_[:, 0], labels_], axis=1)
return indices_
if class_specific_filter:
all_indices = []
# perform per class filtering
for c in range(int(classification.shape[1])):
scores = classification[:, c]
labels = c * tf.ones((keras.backend.shape(scores)[0],), dtype='int64')
all_indices.append(_filter_detections(scores, labels))
# concatenate indices to single tensor
# (concatenated_num_score_nms_keeps, 2)
indices = keras.backend.concatenate(all_indices, axis=0)
else:
scores = keras.backend.max(classification, axis=1)
labels = keras.backend.argmax(classification, axis=1)
indices = _filter_detections(scores, labels)
# select top k
scores = tf.gather_nd(classification, indices)
labels = indices[:, 1]
scores, top_indices = tf.nn.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))
# filter input using the final set of indices
indices = keras.backend.gather(indices[:, 0], top_indices)
boxes = keras.backend.gather(boxes, indices)
labels = keras.backend.gather(labels, top_indices)
# zero pad the outputs
pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])
boxes = tf.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)
scores = tf.pad(scores, [[0, pad_size]], constant_values=-1)
labels = tf.pad(labels, [[0, pad_size]], constant_values=-1)
labels = keras.backend.cast(labels, 'int32')
# set shapes, since we know what they are
boxes.set_shape([max_detections, 4])
scores.set_shape([max_detections])
labels.set_shape([max_detections])
return [boxes, scores, labels]
class FilterDetections(keras.layers.Layer):
"""
Keras layer for filtering detections using score threshold and NMS.
"""
def __init__(
self,
nms=True,
class_specific_filter=True,
nms_threshold=0.5,
score_threshold=0.01,
max_detections=300,
parallel_iterations=32,
**kwargs
):
"""
Filters detections using score threshold, NMS and selecting the top-k detections.
Args
nms: Flag to enable/disable NMS.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
parallel_iterations: Number of batch items to process in parallel.
"""
self.nms = nms
self.class_specific_filter = class_specific_filter
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.parallel_iterations = parallel_iterations
super(FilterDetections, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
Constructs the NMS graph.
Args
inputs : List of [boxes, classification, other[0], other[1], ...] tensors.
"""
boxes = inputs[0]
classification = inputs[1]
# wrap nms with our parameters
def _filter_detections(args):
boxes_ = args[0]
classification_ = args[1]
return filter_detections(
boxes_,
classification_,
nms=self.nms,
class_specific_filter=self.class_specific_filter,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
nms_threshold=self.nms_threshold,
)
outputs = tf.map_fn(
_filter_detections,
elems=[boxes, classification],
dtype=['float32', 'float32', 'int32'],
parallel_iterations=self.parallel_iterations
)
return outputs
def compute_output_shape(self, input_shape):
"""
Computes the output shapes given the input shapes.
Args
input_shape : List of input shapes [boxes, classification].
Returns
List of tuples representing the output shapes:
[filtered_boxes.shape, filtered_scores.shape, filtered_labels.shape, filtered_other[0].shape, filtered_other[1].shape, ...]
"""
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
]
def compute_mask(self, inputs, mask=None):
"""
This is required in Keras when there is more than 1 output.
"""
return (len(inputs) + 1) * [None]
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(FilterDetections, self).get_config()
config.update({
'nms': self.nms,
'class_specific_filter': self.class_specific_filter,
'nms_threshold': self.nms_threshold,
'score_threshold': self.score_threshold,
'max_detections': self.max_detections,
'parallel_iterations': self.parallel_iterations,
})
return config
| 37.948718
| 135
| 0.616795
|
cea434fc52b28cd56874ad297890931870f42cc3
| 4,699
|
py
|
Python
|
koans/python_koans-master/python_koans-master/python3/koans/about_proxy_object_project.py
|
Readarmon0/Code_Challenges
|
37c3d23a60f43f3fd5214dfe2fb1848a646feb0d
|
[
"MIT"
] | 1
|
2016-11-12T04:30:13.000Z
|
2016-11-12T04:30:13.000Z
|
koans/python_koans-master/python_koans-master/python3/koans/about_proxy_object_project.py
|
Readarmon0/Code_Challenges
|
37c3d23a60f43f3fd5214dfe2fb1848a646feb0d
|
[
"MIT"
] | null | null | null |
koans/python_koans-master/python_koans-master/python3/koans/about_proxy_object_project.py
|
Readarmon0/Code_Challenges
|
37c3d23a60f43f3fd5214dfe2fb1848a646feb0d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier than its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy:
def __init__(self, target_object):
self._messages = []
self._obj = target_object
def __getattr__(self, attr_name):
self._messages.append(attr_name)
return self._obj.__getattribute__(attr_name)
def __setattr__(self, attr_name, value):
self_attrs = ['_messages', '_obj']
if attr_name in self_attrs:
object.__setattr__(self, attr_name, value)
else:
self._messages.append(attr_name)
self._obj.__setattr__(attr_name, value)
def messages(self):
return self._messages
def was_called(self, attr_name):
return attr_name in self._messages
def number_of_times_called(self, attr_name):
return len([x for x in self._messages if x == attr_name])
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
with self.assertRaises(AttributeError):
tv.no_such_method()
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television:
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| 26.698864
| 73
| 0.633113
|
263e9ab143a530098b1aac043b0236cc3cbfb712
| 721
|
py
|
Python
|
code/Level 2 - Intro to Programming/1-variables/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | 3
|
2021-04-27T14:01:49.000Z
|
2021-09-30T21:17:27.000Z
|
code/Level 2 - Intro to Programming/1-variables/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | 18
|
2019-01-13T13:14:30.000Z
|
2020-07-07T23:47:17.000Z
|
code/Level 2 - Intro to Programming/1-variables/main.py
|
tscofield/cpx-training
|
682a2cef6bb164bc7c374744de94c21581258392
|
[
"MIT"
] | 3
|
2019-02-07T03:43:30.000Z
|
2020-05-15T17:12:11.000Z
|
# Learn more https://realpython.com/python-variables/
# and https://realpython.com/python-data-types/
import time
import board
import neopixel
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=.2)
# Colors
BLACK = (0, 0, 0)
RED = (255, 0, 0)
PINK = (255, 100, 120)
ORANGE = (255, 100, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
PURPLE = (255, 0, 255)
BLUE = (0, 0, 255)
LIGHT_BLUE = (80, 200, 175)
WHITE = (255, 255, 255)
pixels = pixels
pixels.fill(BLACK)
pixels.show()
while True:
for i in range(len(pixels)):
pixels[i] = RED
time.sleep(.05)
time.sleep(1)
for i in range(len(pixels)):
pixels[i] = GREEN
time.sleep(.05)
time.sleep(1)
| 19.486486
| 61
| 0.619972
|
21af4b7003e161bdf10fc1b8ea094e97cb82461f
| 1,990
|
py
|
Python
|
docs/source/conf.py
|
iot-spectator/iot-health
|
ff5cf5b3613d47fb990751259fab68ad8940b1c4
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
iot-spectator/iot-health
|
ff5cf5b3613d47fb990751259fab68ad8940b1c4
|
[
"MIT"
] | 22
|
2020-10-05T00:31:31.000Z
|
2021-05-15T06:37:37.000Z
|
docs/source/conf.py
|
iot-spectator/iot-health
|
ff5cf5b3613d47fb990751259fab68ad8940b1c4
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
"""Sphinx document configurations."""
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "IoT Health"
copyright = "2021, IoT Spectator"
author = "IoT Spectator"
# The full version, including alpha/beta/rc tags
release = "0.0.3"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon"]
napoleon_google_docstring = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 32.622951
| 79
| 0.669347
|
8495aa07c1bfcd3123cc59a8ee86e9fa60122fde
| 55
|
py
|
Python
|
backEnd/model/comment.py
|
PommesPeter/PhotograhicSharing
|
89e46ec2de116c662e853a755133bb11501cd4ff
|
[
"MIT"
] | 1
|
2021-08-22T08:48:42.000Z
|
2021-08-22T08:48:42.000Z
|
backEnd/model/comment.py
|
PommesPeter/PhotograhicSharing
|
89e46ec2de116c662e853a755133bb11501cd4ff
|
[
"MIT"
] | 5
|
2021-08-21T03:26:01.000Z
|
2021-09-12T04:01:03.000Z
|
backEnd/model/comment.py
|
PommesPeter/PhotograhicSharing
|
89e46ec2de116c662e853a755133bb11501cd4ff
|
[
"MIT"
] | 3
|
2021-08-22T02:55:59.000Z
|
2021-08-23T10:41:24.000Z
|
class Comment:
def __init__(self):
pass
| 13.75
| 24
| 0.563636
|
053d2ff88878fcc274fe5f3c3093f37485bcc7f6
| 24,754
|
py
|
Python
|
library/aix_nim_vios_hc.py
|
wtcross/ansible-playbooks
|
5e1af5cce65109045070c655747837c3edc6e7e4
|
[
"Apache-2.0"
] | null | null | null |
library/aix_nim_vios_hc.py
|
wtcross/ansible-playbooks
|
5e1af5cce65109045070c655747837c3edc6e7e4
|
[
"Apache-2.0"
] | null | null | null |
library/aix_nim_vios_hc.py
|
wtcross/ansible-playbooks
|
5e1af5cce65109045070c655747837c3edc6e7e4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2017, International Business Machines Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""AIX VIOS Health Check: check the pair of VIOS can be updated"""
import os
import re
import subprocess
import logging
# Ansible module 'boilerplate'
# pylint: disable=wildcard-import,unused-wildcard-import,redefined-builtin
from ansible.module_utils.basic import *
DOCUMENTATION = """
---
module: aix_nim_vios_hc
author: "Patrice Jacquin"
version_added: "1.0.0"
requirements: [ AIX ]
"""
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def exec_cmd(cmd, module, exit_on_error=False, debug_data=True):
"""
Execute the given command
- cmd array of the command parameters
- module the module variable
- exit_on_error execption is raised if true and cmd return !0
- debug_data prints some trace in DEBUG_DATA if set
In case of error set an error massage and fails the module
return
- ret_code (return code of the command)
- output output of the command
"""
global DEBUG_DATA
ret_code = 0
output = ''
logging.debug('exec command:{}'.format(cmd))
if debug_data is True:
DEBUG_DATA.append('exec command:{}'.format(cmd))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
# exception for ret_code != 0 can be cached if exit_on_error is set
output = exc.output
ret_code = exc.returncode
if exit_on_error is True:
msg = 'Command: {} Exception.Args{} =>RetCode:{} ... Error:{}'\
.format(cmd, exc.cmd, ret_code, output)
module.fail_json(msg=msg)
except Exception as exc:
# uncatched exception
msg = 'Command: {} Exception.Args{}'.format(cmd, exc.args)
module.fail_json(msg=msg)
if ret_code == 0:
if debug_data is True:
DEBUG_DATA.append('exec output:{}'.format(output))
logging.debug('exec command output:{}'.format(output))
else:
if debug_data is True:
DEBUG_DATA.append('exec command ret_code:{}, stderr:{}'.format(ret_code, output))
logging.debug('exec command ret_code:{}, stderr:{}'.format(ret_code, output))
return (ret_code, output)
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def get_hmc_info(module):
"""
Get the hmc info on the nim master
fill the hmc_dic passed in parameter
return a dic with hmc info
"""
std_out = ''
info_hash = {}
cmd = ['lsnim', '-t', 'hmc', '-l']
(ret, std_out) = exec_cmd(cmd, module)
obj_key = ''
for line in std_out.split('\n'):
line = line.rstrip()
match_key = re.match(r"^(\S+):", line)
# HMC name
if match_key:
obj_key = match_key.group(1)
info_hash[obj_key] = {}
continue
match_cstate = re.match(r"^\s+Cstate\s+=\s+(.*)$", line)
if match_cstate:
cstate = match_cstate.group(1)
info_hash[obj_key]['cstate'] = cstate
continue
match_key = re.match(r"^\s+passwd_file\s+=\s+(.*)$", line)
if match_key:
info_hash[obj_key]['passwd_file'] = match_key.group(1)
continue
match_key = re.match(r"^\s+login\s+=\s+(.*)$", line)
if match_key:
info_hash[obj_key]['login'] = match_key.group(1)
continue
match_key = re.match(r"^\s+if1\s*=\s*\S+\s*(\S*)\s*.*$", line)
if match_key:
info_hash[obj_key]['ip'] = match_key.group(1)
continue
return info_hash
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def get_nim_cecs_info(module):
"""
Get the list of the cec defined on the nim master and gat their serial number.
return the list of the name of the cec objects defined on the
nim master and their associated CEC serial number value
"""
std_out = ''
info_hash = {}
cmd = ['lsnim', '-t', 'cec', '-l']
(ret, std_out) = exec_cmd(cmd, module)
# lpar name and associated Cstate
obj_key = ""
for line in std_out.split('\n'):
line = line.rstrip()
match_key = re.match(r"^(\S+):", line)
if match_key:
obj_key = match_key.group(1)
info_hash[obj_key] = {}
continue
match_serial = re.match(r"^\s+serial\s+=\s+(.*)$", line)
if match_serial:
info_hash[obj_key]['serial'] = match_serial.group(1)
continue
return info_hash
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def get_nim_clients_info(module, lpar_type):
"""
Get the list of the lpar (standalones or vios) defined on the nim master, and get their
cstate.
return the list of the name of the lpar objects defined on the
nim master and their associated cstate value
"""
std_out = ''
info_hash = {}
cmd = ['lsnim', '-t', lpar_type, '-l']
(ret, std_out) = exec_cmd(cmd, module)
# lpar name and associated Cstate
obj_key = ""
for line in std_out.split('\n'):
line = line.rstrip()
match_key = re.match(r"^(\S+):", line)
if match_key:
obj_key = match_key.group(1)
info_hash[obj_key] = {}
continue
match_cstate = re.match(r"^\s+Cstate\s+=\s+(.*)$", line)
if match_cstate:
info_hash[obj_key]['cstate'] = match_cstate.group(1)
continue
# For VIOS store the management profile
if lpar_type == 'vios':
match_mgmtprof = re.match(r"^\s+mgmt_profile1\s+=\s+(.*)$", line)
if match_mgmtprof:
mgmt_elts = match_mgmtprof.group(1).split()
if len(mgmt_elts) == 3:
info_hash[obj_key]['mgmt_hmc_id'] = mgmt_elts[0]
info_hash[obj_key]['mgmt_vios_id'] = mgmt_elts[1]
info_hash[obj_key]['mgmt_cec'] = mgmt_elts[2]
match_if = re.match(r"^\s+if1\s+=\s+\S+\s+(\S+)\s+.*$", line)
if match_if:
info_hash[obj_key]['vios_ip'] = match_if.group(1)
return info_hash
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def build_nim_node(module):
"""
build the nim node containing the nim vios and hmcinfo.
arguments:
None
return:
None
"""
global NIM_NODE
# =========================================================================
# Build hmc info list
# =========================================================================
nim_hmc = {}
nim_hmc = get_hmc_info(module)
NIM_NODE['nim_hmc'] = nim_hmc
logging.debug('NIM HMC: {}'.format(nim_hmc))
# =========================================================================
# Build CEC list
# =========================================================================
nim_cec = {}
nim_cec = get_nim_cecs_info(module)
# =========================================================================
# Build vios info list
# =========================================================================
nim_vios = {}
nim_vios = get_nim_clients_info(module, 'vios')
# =========================================================================
# Complete the CEC serial in nim_vios dict
# =========================================================================
for key in nim_vios:
if nim_vios[key]['mgmt_cec'] in nim_cec:
nim_vios[key]['mgmt_cec_serial'] = nim_cec[nim_vios[key]['mgmt_cec']]['serial']
NIM_NODE['nim_vios'] = nim_vios
logging.debug('NIM VIOS: {}'.format(nim_vios))
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def check_vios_targets(targets):
"""
check the list of the vios targets.
a target name could be of the following form:
(vios1, vios2) (vios3)
arguments:
targets (str): list of tuple of NIM name of vios machine
return: the list of the existing vios tuple matching the target list
"""
global NIM_NODE
vios_list = {}
vios_list_tuples_res = []
vios_list_tuples = targets.replace(" ", "").replace("),(", ")(").split('(')
# ===========================================
# Build targets list
# ===========================================
for vios_tuple in vios_list_tuples[1:]:
logging.debug('vios_tuple: {}'.format(vios_tuple))
tuple_elts = list(vios_tuple[:-1].split(','))
tuple_len = len(tuple_elts)
if tuple_len != 1 and tuple_len != 2:
logging.error('Malformed VIOS targets {}. Tuple {} should be a 2 or 4 elements.'
.format(targets, tuple_elts))
return None
# check vios not already exists in the target list
if tuple_elts[0] in vios_list or \
(tuple_len == 2 and (tuple_elts[1] in vios_list or
tuple_elts[0] == tuple_elts[1])):
logging.error('Malformed VIOS targets {}. Duplicated VIOS'
.format(targets))
return None
# check vios is known by the NIM master - if not ignore it
if tuple_elts[0] not in NIM_NODE['nim_vios'] or \
(tuple_len == 2 and tuple_elts[1] not in NIM_NODE['nim_vios']):
logging.debug('skipping {} as VIOS not known by the NIM master.'
.format(vios_tuple))
continue
if tuple_len == 2:
vios_list[tuple_elts[0]] = tuple_elts[1]
vios_list[tuple_elts[1]] = tuple_elts[0]
# vios_list = vios_list.extend([tuple_elts[0], tuple_elts[1]])
my_tuple = (tuple_elts[0], tuple_elts[1])
vios_list_tuples_res.append(tuple(my_tuple))
else:
vios_list[tuple_elts[0]] = tuple_elts[0]
# vios_list.append(tuple_elts[0])
my_tuple = (tuple_elts[0],)
vios_list_tuples_res.append(tuple(my_tuple))
return vios_list_tuples_res
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def vios_health(module, mgmt_sys_uuid, hmc_ip, vios_uuids):
"""
Check the "health" of the given VIOSES
return: True if ok,
False else
"""
global NIM_NODE
logging.debug('hmc_ip: {} vios_uuids: {}'.format(hmc_ip, vios_uuids))
# build the vioshc cmde
cmd = ['/usr/sbin/vioshc.py', '-i', hmc_ip, '-m', mgmt_sys_uuid]
for vios in vios_uuids:
cmd.extend(['-U', vios])
if VERBOSITY != 0:
vstr = "-v"
verbose = 1
while verbose < VERBOSITY:
vstr += "v"
verbose += 1
cmd.extend([vstr])
if VERBOSITY >= 3:
cmd.extend(['-D'])
(ret, std_out) = exec_cmd(cmd, module)
if ret != 0:
OUTPUT.append(' VIOS Health check failed, vioshc returns: {} {}'
.format(ret, std_out))
logging.error('VIOS Health check failed, vioshc returns: {} {}'
.format(ret, std_out))
OUTPUT.append(' VIOS can NOT be updated')
logging.info('vioses {} can NOT be updated'.format(vios_uuids))
ret = 1
elif re.search(r'Pass rate of 100%', std_out, re.M):
OUTPUT.append(' VIOS Health check passed')
logging.info('vioses {} can be updated'.format(vios_uuids))
ret = 0
else:
OUTPUT.append(' VIOS can NOT be updated')
logging.info('vioses {} can NOT be updated'.format(vios_uuids))
ret = 1
return ret
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def vios_health_init(module, hmc_id, hmc_ip):
"""
Check the "health" of the given VIOSES for a rolling update point of view
This operation uses the vioshc.py script to evaluate the capacity of the
pair of the VIOSes to support the rolling update operation:
- check they manage the same LPARs,
- ...
return: True if ok,
False else
"""
global NIM_NODE
logging.debug('hmc_id: {}, hmc_ip: {}'.format(hmc_id, hmc_ip))
ret = 0
# if needed, call the /usr/sbin/vioshc.py script a first time to
# collect UUIDs
cmd = ['/usr/sbin/vioshc.py', '-i', hmc_ip, '-l', 'a']
if VERBOSITY != 0:
vstr = "-v"
verbose = 1
while verbose < VERBOSITY:
vstr += "v"
verbose += 1
cmd.extend([vstr])
if VERBOSITY >= 3:
cmd.extend(['-D'])
(ret, std_out) = exec_cmd(cmd, module)
if ret != 0:
OUTPUT.append(' Failed to get the VIOS information, vioshc returns: {} {}'
.format(ret, std_out))
logging.error('Failed to get the VIOS information, vioshc returns: {} {}'
.format(ret, std_out))
msg = 'Health init check failed. vioshc command error. rc:{}, error: {}'\
.format(ret, std_out)
module.fail_json(msg=msg)
# Parse the output and store the UUIDs
data_start = 0
vios_section = 0
cec_uuid = ''
cec_serial = ''
for line in std_out.split('\n'):
line = line.rstrip()
# TBC - remove?
logging.debug('--------line {}'.format(line))
if vios_section == 0:
# skip the header
match_key = re.match(r"^-+\s+-+$", line)
if match_key:
data_start = 1
continue
if data_start == 0:
continue
# New managed system section
match_key = re.match(r"^(\S+)\s+(\S+)$", line)
if match_key:
cec_uuid = match_key.group(1)
cec_serial = match_key.group(2)
logging.debug('New managed system section:{},{}'
.format(cec_uuid, cec_serial))
continue
# New vios section
match_key = re.match(r"^\s+-+\s+-+$", line)
if match_key:
vios_section = 1
continue
# skip all header and empty lines until the vios section
continue
# new vios partition
match_key = re.match(r"^\s+(\S+)\s+(\S+)$", line)
if match_key:
vios_uuid = match_key.group(1)
vios_part_id = match_key.group(2)
logging.debug('new vios partitionsection:{},{}'
.format(vios_uuid, vios_part_id))
# retrieve the vios with the vios_part_id and the cec_serial value
# and store the UUIDs in the dictionaries
for vios_key in NIM_NODE['nim_vios']:
if NIM_NODE['nim_vios'][vios_key]['mgmt_vios_id'] == vios_part_id \
and NIM_NODE['nim_vios'][vios_key]['mgmt_cec_serial'] == cec_serial:
NIM_NODE['nim_vios'][vios_key]['vios_uuid'] = vios_uuid
NIM_NODE['nim_vios'][vios_key]['cec_uuid'] = cec_uuid
break
continue
# skip vios line where lparid is not found.
match_key = re.match(r"^\s+(\S+)\s+none$", line)
if match_key:
continue
# skip empty line after vios section. stop the vios section
match_key = re.match(r"^$", line)
if match_key:
vios_section = 0
continue
OUTPUT.append(' Bad command output for the hmc: {}'.format(hmc_id))
logging.error('vioshc command, bad output line: {}'.format(line))
msg = 'Health init check failed. Bad vioshc.py command output for the {} hmc - output: {}'\
.format(hmc_id, line)
module.fail_json(msg=msg)
logging.debug('vioshc output: {}'.format(line))
return ret
# ----------------------------------------------------------------
# ----------------------------------------------------------------
def health_check(module, targets):
"""
Healt assessment of the VIOSes targets to ensure they can be support
a rolling update operation.
For each VIOS tuple,
- call /usr/sbin/vioshc.py a first time to collect the VIOS UUIDs
- call it a second time to check the healthiness
return: True if ok,
False else
"""
global NIM_NODE
logging.debug('targets: {}'.format(targets))
health_tab = {}
vios_key = []
for target_tuple in targets:
OUTPUT.append('Checking: {}'.format(target_tuple))
logging.debug('target_tuple: {}'.format(target_tuple))
tup_len = len(target_tuple)
vios1 = target_tuple[0]
if tup_len == 2:
vios2 = target_tuple[1]
vios_key = "{}-{}".format(vios1, vios2)
else:
vios_key = vios1
logging.debug('vios1: {}'.format(vios1))
# cec_serial = NIM_NODE['nim_vios'][vios1]['mgmt_cec_serial']
hmc_id = NIM_NODE['nim_vios'][vios1]['mgmt_hmc_id']
if hmc_id not in NIM_NODE['nim_hmc']:
OUTPUT.append(' VIOS {} refers to an inexistant hmc {}'
.format(vios1, hmc_id))
logging.warn("VIOS {} refers to an inexistant hmc {}"
.format(vios1, hmc_id))
health_tab[vios_key] = 'FAILURE-HC'
continue
hmc_ip = NIM_NODE['nim_hmc'][hmc_id]['ip']
vios_uuid = []
# if needed call vios_health_init to get the UUIDs value
if 'vios_uuid' not in NIM_NODE['nim_vios'][vios1] \
or tup_len == 2 and 'vios_uuid' not in NIM_NODE['nim_vios'][vios2]:
OUTPUT.append(' Getting VIOS UUID')
ret = vios_health_init(module, hmc_id, hmc_ip)
if ret != 0:
OUTPUT.append(' Unable to get UUIDs of {} and {}, ret: {}'
.format(vios1, vios2, ret))
logging.warn("Unable to get UUIDs of {} and {}, ret: {}"
.format(vios1, vios2, ret))
health_tab[vios_key] = 'FAILURE-HC'
continue
if 'vios_uuid' not in NIM_NODE['nim_vios'][vios1] \
or tup_len == 2 and 'vios_uuid' not in NIM_NODE['nim_vios'][vios2]:
# vios uuid's not found
OUTPUT.append(' One VIOS UUID not found')
logging.warn("Unable to find one vios_uuid in NIM_NODE")
health_tab[vios_key] = 'FAILURE-HC'
else:
# run the vios_health check for the vios tuple
vios_uuid.append(NIM_NODE['nim_vios'][vios1]['vios_uuid'])
if tup_len == 2:
vios_uuid.append(NIM_NODE['nim_vios'][vios2]['vios_uuid'])
mgmt_uuid = NIM_NODE['nim_vios'][vios1]['cec_uuid']
OUTPUT.append(' Checking if we can update the VIOS')
ret = vios_health(module, mgmt_uuid, hmc_ip, vios_uuid)
if ret == 0:
OUTPUT.append(' Health check succeeded')
logging.info("Health check succeeded for {}".format(vios_key))
health_tab[vios_key] = 'SUCCESS-HC'
else:
OUTPUT.append(' Health check failed')
logging.info("Health check failed for {}".format(vios_key))
health_tab[vios_key] = 'FAILURE-HC'
logging.debug('health_tab: {}'. format(health_tab))
return health_tab
################################################################################
if __name__ == '__main__':
DEBUG_DATA = []
OUTPUT = []
PARAMS = {}
NIM_NODE = {}
CHANGED = False
targets_list = []
VARS = {}
VERBOSITY = 0
module = AnsibleModule(
argument_spec=dict(
description=dict(required=False, type='str'),
targets=dict(required=True, type='str'),
action=dict(required=True, choices=['health_check'], type='str'),
vars=dict(required=False, type='dict'),
),
supports_check_mode=True
)
# =========================================================================
# Get Module params
# =========================================================================
action = module.params['action']
targets = module.params['targets']
VERBOSITY = module._verbosity
if module.params['description']:
description = module.params['description']
else:
description = "Perform a VIOS Health Check operation: {} request".format(action)
PARAMS['action'] = action
PARAMS['targets'] = targets
PARAMS['Description'] = description
# Handle playbook variables
if module.params['vars']:
VARS = module.params['vars']
if VARS is not None and 'log_file' not in VARS:
VARS['log_file'] = '/tmp/ansible_vios_check_debug.log'
# Open log file
logging.basicConfig(
filename="{}".format(VARS['log_file']),
format='[%(asctime)s] %(levelname)s: [%(funcName)s:%(thread)d] %(message)s',
level=logging.DEBUG)
logging.debug('*** START VIOS {} ***'.format(action.upper()))
OUTPUT.append('VIOS Health Check operation for {}'.format(targets))
logging.info('action {} for {} targets'.format(action, targets))
logging.info('VERBOSITY is set to {}'.format(VERBOSITY))
targets_health_status = {}
# =========================================================================
# build nim node info
# =========================================================================
build_nim_node(module)
ret = check_vios_targets(targets)
if (ret is None) or (not ret):
OUTPUT.append(' Warning: Empty target list')
logging.warn('Empty target list: "{}"'.format(targets))
else:
targets_list = ret
OUTPUT.append(' Targets list: {}'.format(targets_list))
logging.debug('Targets list: {}'.format(targets_list))
# ===============================================
# Check vioshc script is present, else install it
# ===============================================
logging.debug('Check vioshc script: /usr/sbin/vioshc.py')
vioshcpath = os.path.abspath(os.path.join(os.sep, 'usr', 'sbin'))
vioshcfile = os.path.join(vioshcpath, 'vioshc.py')
if not os.path.exists(vioshcfile):
OUTPUT.append('Cannot find {}'.format(vioshcfile))
logging.error('Cannot find {}'.format(vioshcfile))
module.fail_json(msg="Cannot find {}".format(vioshcfile))
st = os.stat(vioshcfile)
if not st.st_mode & stat.S_IEXEC:
OUTPUT.append('Bad credentials for {}'.format(vioshcfile))
logging.error('Bad credentials for {}'.format(vioshcfile))
module.fail_json(msg="Bad credentials for {}".format(vioshcfile))
targets_health_status = health_check(module, targets_list)
OUTPUT.append('VIOS Health Check status:')
logging.info('VIOS Health Check status:')
for vios_key in targets_health_status.keys():
OUTPUT.append(" {} : {}".format(vios_key, targets_health_status[vios_key]))
logging.info(' {} : {}'.format(vios_key, targets_health_status[vios_key]))
# ==========================================================================
# Exit
# ==========================================================================
module.exit_json(
changed=CHANGED,
msg="VIOS Health Check completed successfully",
targets=targets_list,
nim_node=NIM_NODE,
status=targets_health_status,
debug_output=DEBUG_DATA,
output=OUTPUT)
| 35.062323
| 99
| 0.518098
|
713a97510e68f21c5b9776a6a22c542bd62fe5c4
| 354
|
py
|
Python
|
fuzzysets.py
|
vbsinha/fuzzy-image-enhancement
|
6bf983ddbc558b71b759dff9b1d86a9c133b1e14
|
[
"MIT"
] | 11
|
2019-06-01T10:13:57.000Z
|
2021-05-26T08:00:01.000Z
|
fuzzysets.py
|
vbsinha/fuzzy-image-enhancement
|
6bf983ddbc558b71b759dff9b1d86a9c133b1e14
|
[
"MIT"
] | 2
|
2021-01-30T23:49:20.000Z
|
2021-11-01T11:54:17.000Z
|
fuzzysets.py
|
vbsinha/fuzzy-image-enhancement
|
6bf983ddbc558b71b759dff9b1d86a9c133b1e14
|
[
"MIT"
] | 4
|
2019-08-14T00:42:24.000Z
|
2021-09-07T19:40:39.000Z
|
import scipy.stats
def medium(x):
assert x >= 0 and x <= 1
return 0.25 * scipy.stats.norm(0.4, 0.1).pdf(x)
def small(x):
assert x >= 0 and x <= 1
return 0.5 * scipy.stats.norm(0, 0.2).pdf(x)
def large(x):
assert x >= 0 and x <= 1
if x < 0.8:
return 0.5 * scipy.stats.norm(0.8, 0.2).pdf(x)
else:
return 1
| 17.7
| 54
| 0.533898
|
9ebaf5b154ae08340c286ef30a5fcf0a8d992ce1
| 4,314
|
py
|
Python
|
build/env/lib/python2.7/site-packages/windmill-1.6-py2.7.egg/windmill/management/commands/test_windmill.py
|
bopopescu/myhue
|
5f566970a5a1fa5af9f01832c9e9808c47634bc7
|
[
"Apache-2.0"
] | 61
|
2015-03-16T18:36:06.000Z
|
2021-12-02T10:08:17.000Z
|
windmill/management/commands/test_windmill.py
|
admc/windmill
|
4304ee7258eb0c2814f215d8ce90abf02b1f737f
|
[
"Apache-2.0"
] | 8
|
2015-03-10T10:01:26.000Z
|
2020-05-18T10:51:24.000Z
|
windmill/management/commands/test_windmill.py
|
admc/windmill
|
4304ee7258eb0c2814f215d8ce90abf02b1f737f
|
[
"Apache-2.0"
] | 14
|
2015-01-29T16:28:33.000Z
|
2021-09-04T11:19:48.000Z
|
# Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand
from windmill.authoring import djangotest
import sys, os
from time import sleep
import types
import logging
class ServerContainer(object):
start_test_server = djangotest.start_test_server
stop_test_server = djangotest.stop_test_server
def attempt_import(name, suffix):
try:
mod = __import__(name+'.'+suffix)
except ImportError:
mod = None
if mod is not None:
s = name.split('.')
mod = __import__(s.pop(0))
for x in s+[suffix]:
mod = getattr(mod, x)
return mod
class Command(BaseCommand):
help = "Run windmill tests. Specify a browser, if one is not passed Firefox will be used"
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
from windmill.conf import global_settings
from windmill.authoring.djangotest import WindmillDjangoUnitTest
if 'ie' in labels:
global_settings.START_IE = True
sys.argv.remove('ie')
elif 'safari' in labels:
global_settings.START_SAFARI = True
sys.argv.remove('safari')
elif 'chrome' in labels:
global_settings.START_CHROME = True
sys.argv.remove('chrome')
else:
global_settings.START_FIREFOX = True
if 'firefox' in labels:
sys.argv.remove('firefox')
if 'manage.py' in sys.argv:
sys.argv.remove('manage.py')
if 'test_windmill' in sys.argv:
sys.argv.remove('test_windmill')
server_container = ServerContainer()
server_container.start_test_server()
global_settings.TEST_URL = 'http://127.0.0.1:%d' % server_container.server_thread.port
# import windmill
# windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
from windmill.authoring import setup_module, teardown_module
from django.conf import settings
tests = []
for name in settings.INSTALLED_APPS:
for suffix in ['tests', 'wmtests', 'windmilltests']:
x = attempt_import(name, suffix)
if x is not None: tests.append((suffix,x,));
wmtests = []
for (ttype, mod,) in tests:
if ttype == 'tests':
for ucls in [getattr(mod, x) for x in dir(mod)
if ( type(getattr(mod, x, None)) in (types.ClassType,
types.TypeType) ) and
issubclass(getattr(mod, x), WindmillDjangoUnitTest)
]:
wmtests.append(ucls.test_dir)
else:
if mod.__file__.endswith('__init__.py') or mod.__file__.endswith('__init__.pyc'):
wmtests.append(os.path.join(*os.path.split(os.path.abspath(mod.__file__))[:-1]))
else:
wmtests.append(os.path.abspath(mod.__file__))
if len(wmtests) is 0:
print 'Sorry, no windmill tests found.'
else:
testtotals = {}
x = logging.getLogger()
x.setLevel(0)
from windmill.dep import functest
bin = functest.bin
runner = functest.runner
runner.CLIRunner.final = classmethod(lambda self, totals: testtotals.update(totals) )
setup_module(tests[0][1])
sys.argv = sys.argv + wmtests
bin.cli()
teardown_module(tests[0][1])
if testtotals['fail'] is not 0:
sleep(.5)
sys.exit(1)
| 37.189655
| 100
| 0.587854
|
6074f579981b273fbdd87c216adea1fc38280a4c
| 1,767
|
py
|
Python
|
research/talks/ibm/05-2017/multicos.py
|
cgranade/cgranade.github.io
|
72f263c2cc03a14a5fa853cb7b7b53a5b57626c7
|
[
"MIT"
] | 3
|
2015-10-28T12:36:06.000Z
|
2018-12-25T17:07:58.000Z
|
research/talks/uts/03-2017/multicos.py
|
cgranade/cgranade.github.io
|
72f263c2cc03a14a5fa853cb7b7b53a5b57626c7
|
[
"MIT"
] | 1
|
2017-05-08T10:00:27.000Z
|
2017-05-08T10:00:27.000Z
|
research/talks/macquarie/08-2016/multicos.py
|
cgranade/cgranade.github.io
|
72f263c2cc03a14a5fa853cb7b7b53a5b57626c7
|
[
"MIT"
] | 3
|
2016-05-11T00:23:22.000Z
|
2017-05-08T04:59:18.000Z
|
from qinfer.abstract_model import Model
import numpy as np
class MultiCosModel(Model):
@property
def n_modelparams(self):
return 2
@property
def is_n_outcomes_constant(self):
return True
def n_outcomes(self, expparams):
return 2
def are_models_valid(self, modelparams):
return np.all(np.logical_and(modelparams > 0, modelparams <= 1), axis=1)
@property
def expparams_dtype(self):
return [('ts', '2float')]
def likelihood(self, outcomes, modelparams, expparams):
# We first call the superclass method, which basically
# just makes sure that call count diagnostics are properly
# logged.
super(MultiCosModel, self).likelihood(outcomes, modelparams, expparams)
# Next, since we have a two-outcome model, everything is defined by
# Pr(0 | modelparams; expparams), so we find the probability of 0
# for each model and each experiment.
#
# We do so by taking a product along the modelparam index (len 2,
# indicating omega_1 or omega_2), then squaring the result.
pr0 = np.prod(
np.cos(
# shape (n_models, 1, 2)
modelparams[:, np.newaxis, :] *
# shape (n_experiments, 2)
expparams['ts']
), # <- broadcasts to shape (n_models, n_experiments, 2).
axis=2 # <- product over the final index (len 2)
) ** 2 # square each element
# Now we use pr0_to_likelihood_array to turn this two index array
# above into the form expected by SMCUpdater and other consumers
# of likelihood().
return Model.pr0_to_likelihood_array(outcomes, pr0)
| 35.34
| 80
| 0.611205
|
e20b3f2f0498f6b65f6ff0000549ef3bb15dbcd8
| 1,718
|
py
|
Python
|
setup.py
|
kakwa/ldapcherry-ppolicy-cracklib
|
4af7b9fa0ee4dc3ba68ef7119ea1afdbb2b27934
|
[
"MIT"
] | 1
|
2015-11-05T00:20:33.000Z
|
2015-11-05T00:20:33.000Z
|
setup.py
|
kakwa/ldapcherry-ppolicy-cracklib
|
4af7b9fa0ee4dc3ba68ef7119ea1afdbb2b27934
|
[
"MIT"
] | null | null | null |
setup.py
|
kakwa/ldapcherry-ppolicy-cracklib
|
4af7b9fa0ee4dc3ba68ef7119ea1afdbb2b27934
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from distutils.core import setup
install_requires = ['ldapcherry', 'cracklib']
try:
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
description = f.read()
f.close()
except IOError:
description = 'lcppolicy_cracklib'
try:
license = open('LICENSE').read()
except IOError:
license = 'MIT'
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
except ImportError:
from distutils.core import setup
PyTest = lambda x: x
setup(
name='lcppolicy_cracklib',
version='0.1.0',
author='Pierre-Francois Carpentier',
author_email='carpentier.pf@gmail.com',
packages=['lcppolicy_cracklib'],
url='https://github.com/kakwa/ldapcherry-ppolicy-cracklib',
license=license,
description='Cracklib password policy plugin for LdapCherry.',
long_description=description,
install_requires=install_requires,
tests_require=['pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7']
)
| 26.84375
| 67
| 0.658324
|
489cb0c5d18fb27bd6dcb6405fd7912c09b09016
| 854
|
py
|
Python
|
tests/watson/html/test_elements.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/watson/html/test_elements.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/watson/html/test_elements.py
|
watsonpy/watson-html
|
2e7a7024ce2d9301a8a87c762d5a3c468e002e0a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from pytest import raises
from watson.html.elements import flatten_attributes, TagMixin
class TestFlattenAttributes(object):
def test_flatten(self):
attrs = {'class': 'menu', 'id': 'MainMenu'}
assert flatten_attributes(attrs) == 'class="menu" id="MainMenu"'
def test_flatten_forget_empty(self):
attrs = {'class': 'menu', 'id': None}
assert flatten_attributes(attrs) == 'class="menu"'
def test_flatten_keep_empty(self):
attrs = {'class': 'menu', 'id': None}
assert flatten_attributes(attrs, True) == 'class="menu" id=""'
class TestTagMixin(object):
def test_initialize(self):
mixin = TagMixin(id='Test')
assert 'id' in mixin.attributes
def test_render(self):
with raises(NotImplementedError):
str(TagMixin(id='Test'))
| 28.466667
| 72
| 0.639344
|
3a3cdcb16ee44e2c866fbee967792d0e8644c962
| 335
|
py
|
Python
|
challenges/2020/03-tobogganTrajectory/python/partTwo.py
|
codemicro/adventOfCode
|
53574532ece1d19e5f5ba2f39e8e183c4c6225a1
|
[
"MIT"
] | 9
|
2020-12-06T23:18:30.000Z
|
2021-12-19T22:31:26.000Z
|
challenges/2020/03-tobogganTrajectory/python/partTwo.py
|
codemicro/adventOfCode
|
53574532ece1d19e5f5ba2f39e8e183c4c6225a1
|
[
"MIT"
] | null | null | null |
challenges/2020/03-tobogganTrajectory/python/partTwo.py
|
codemicro/adventOfCode
|
53574532ece1d19e5f5ba2f39e8e183c4c6225a1
|
[
"MIT"
] | 3
|
2020-12-08T09:45:44.000Z
|
2020-12-15T19:20:20.000Z
|
from common import *
def partTwo(instr: str) -> int:
forest = parse(instr)
tree_product = 1
offset_pairs = [(3, 1), (1, 1), (5, 1), (7, 1), (1, 2)]
for i, pair in enumerate(offset_pairs):
encountered_trees = find_collisions(forest, *pair)
tree_product *= encountered_trees
return tree_product
| 20.9375
| 59
| 0.620896
|
8c3c62d9f2bf7976ab66d4a22c858a41c1309650
| 1,192
|
py
|
Python
|
rasa_nlu_examples/tokenizers/blankspacy.py
|
aresa7796/rasa-nlu-examples
|
44d11a732ebca8cf32a7fee3477b3e37ee720871
|
[
"Apache-2.0"
] | 129
|
2020-06-26T10:41:24.000Z
|
2022-03-31T17:00:29.000Z
|
rasa_nlu_examples/tokenizers/blankspacy.py
|
aresa7796/rasa-nlu-examples
|
44d11a732ebca8cf32a7fee3477b3e37ee720871
|
[
"Apache-2.0"
] | 109
|
2020-06-22T09:32:18.000Z
|
2022-03-31T06:00:03.000Z
|
rasa_nlu_examples/tokenizers/blankspacy.py
|
aresa7796/rasa-nlu-examples
|
44d11a732ebca8cf32a7fee3477b3e37ee720871
|
[
"Apache-2.0"
] | 59
|
2020-06-21T16:23:22.000Z
|
2022-03-31T17:00:31.000Z
|
from typing import Any, Dict, List, Text
import spacy
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.shared.nlu.training_data.message import Message
class BlankSpacyTokenizer(Tokenizer):
"""
A simple spaCy tokenizer without a language model attached.
This tokenizer implements the tokenizers listed here: https://spacy.io/usage/models#languages
Note that this tokenizer does not require a SpacyNLP component and that the
standard SpacyNLP component should be omitted.
"""
defaults = {
"lang": None,
}
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the a blank spaCy model."""
super().__init__(component_config)
self.nlp = spacy.blank(component_config["lang"])
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
text = message.get(attribute)
doc = self.nlp(text)
tokens = [
Token(
text=t.text,
start=t.idx,
)
for t in doc
if t.text and t.text.strip()
]
return self._apply_token_pattern(tokens)
| 30.564103
| 97
| 0.642617
|
450c3a6b170bc8fc92c1d092a04cc93cf190ccb8
| 941
|
py
|
Python
|
utils/onnx.py
|
jacobwjs/3DDFA_V2
|
1d97fa30ce4b4b9d9df2dfea48481d7598040dcc
|
[
"MIT"
] | null | null | null |
utils/onnx.py
|
jacobwjs/3DDFA_V2
|
1d97fa30ce4b4b9d9df2dfea48481d7598040dcc
|
[
"MIT"
] | null | null | null |
utils/onnx.py
|
jacobwjs/3DDFA_V2
|
1d97fa30ce4b4b9d9df2dfea48481d7598040dcc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
__author__ = 'cleardusk'
import sys
sys.path.append('..')
import torch
import models_3ddfa
from utils.tddfa_util import load_model
def convert_to_onnx(**kvs):
# 1. load model
size = kvs.get('size', 120)
model = getattr(models_3ddfa, kvs.get('arch'))(
num_classes=kvs.get('num_params', 62),
widen_factor=kvs.get('widen_factor', 1),
size=size,
mode=kvs.get('mode', 'small')
)
checkpoint_fp = kvs.get('checkpoint_fp')
model = load_model(model, checkpoint_fp)
model.eval()
# 2. convert
batch_size = 1
dummy_input = torch.randn(batch_size, 3, size, size)
wfp = checkpoint_fp.replace('.pth', '.onnx')
torch.onnx.export(
model,
(dummy_input, ),
wfp,
input_names=['input'],
output_names=['output'],
do_constant_folding=True
)
print(f'Convert {checkpoint_fp} to {wfp} done.')
return wfp
| 22.95122
| 56
| 0.618491
|
b6bbb44dba9cdff28a292e7ffb3d08af2c42cb12
| 4,028
|
py
|
Python
|
pytest_testdox/plugin.py
|
jairhenrique/pytest-testdox
|
ea8796fe09cfb10b7b00786453c690d4630f9076
|
[
"MIT"
] | 42
|
2016-11-04T15:15:22.000Z
|
2022-03-06T11:02:34.000Z
|
pytest_testdox/plugin.py
|
renanivo/pytest-testdox
|
86f8224b416a74ccc5b102b49c24a918c42d87d4
|
[
"MIT"
] | 68
|
2017-03-08T16:15:23.000Z
|
2022-03-10T18:35:48.000Z
|
pytest_testdox/plugin.py
|
jairhenrique/pytest-testdox
|
ea8796fe09cfb10b7b00786453c690d4630f9076
|
[
"MIT"
] | 9
|
2018-01-21T06:21:53.000Z
|
2022-03-06T11:16:27.000Z
|
import sys
import pytest
from _pytest.terminal import TerminalReporter
from . import constants, models, wrappers
def pytest_addoption(parser):
group = parser.getgroup('terminal reporting', 'reporting', after='general')
group.addoption(
'--testdox', action='store_true', dest='testdox', default=False,
help='Report test progress in testdox format'
)
group.addoption(
'--force-testdox', action='store_true',
dest='force_testdox', default=False,
help='Force testdox output even when not in real terminal'
)
parser.addini(
'testdox_format',
help='TestDox report format (plaintext|utf8)',
default='utf8'
)
def should_enable_plugin(config):
return (
(config.option.testdox and sys.stdout.isatty())
or config.option.force_testdox
)
@pytest.mark.trylast
def pytest_configure(config):
config.addinivalue_line(
"markers",
"{}(title): Override testdox report test title".format(
constants.TITLE_MARK
)
)
config.addinivalue_line(
"markers",
"{}(title): Override testdox report class title".format(
constants.CLASS_NAME_MARK
)
)
if should_enable_plugin(config):
# Get the standard terminal reporter plugin and replace it with ours
standard_reporter = config.pluginmanager.getplugin('terminalreporter')
testdox_reporter = TestdoxTerminalReporter(standard_reporter.config)
config.pluginmanager.unregister(standard_reporter)
config.pluginmanager.register(testdox_reporter, 'terminalreporter')
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
result = yield
report = result.get_result()
testdox_title = _first(
mark.args[0]
for mark in item.iter_markers(name=constants.TITLE_MARK)
)
testdox_class_name = _first(
mark.args[0]
for mark in item.iter_markers(name=constants.CLASS_NAME_MARK)
)
if testdox_title:
report.testdox_title = testdox_title
if testdox_class_name:
report.testdox_class_name = testdox_class_name
class TestdoxTerminalReporter(TerminalReporter):
def __init__(self, config, file=None):
super().__init__(config, file)
self._last_header_id = None
self.pattern_config = models.PatternConfig(
files=self.config.getini('python_files'),
functions=self.config.getini('python_functions'),
classes=self.config.getini('python_classes')
)
self.result_wrappers = []
if config.getini('testdox_format') != 'plaintext':
self.result_wrappers.append(wrappers.UTF8Wrapper)
if config.option.color != 'no':
self.result_wrappers.append(wrappers.ColorWrapper)
def _register_stats(self, report):
"""
This method is not created for this plugin, but it is needed in order
to the reporter display the tests summary at the end.
Originally from:
https://github.com/pytest-dev/pytest/blob/47a2a77/_pytest/terminal.py#L198-L201
"""
res = self.config.hook.pytest_report_teststatus(
report=report,
config=self.config
)
category = res[0]
self.stats.setdefault(category, []).append(report)
self._tests_ran = True
def pytest_runtest_logreport(self, report):
self._register_stats(report)
if report.when != 'call' and not report.skipped:
return
result = models.Result.create(report, self.pattern_config)
for wrapper in self.result_wrappers:
result = wrapper(result)
if result.header_id != self._last_header_id:
self._last_header_id = result.header_id
self._tw.sep(' ')
self._tw.line(result.header)
self._tw.line(str(result))
def _first(iterator):
try:
return next(iterator)
except StopIteration:
return None
| 29.617647
| 87
| 0.654916
|
4431a1acd0b96e5f1fc7dc426581547e026f4a75
| 512
|
py
|
Python
|
xinshuo_math/test/bbox_transform/test_get_center_crop_bbox.py
|
xinshuoweng/cv_ml_tool
|
1918b9e37ec5fb8148b8a089f226a4864d67b153
|
[
"MIT"
] | 31
|
2020-03-05T12:27:21.000Z
|
2022-03-07T04:00:18.000Z
|
xinshuo_math/test/bbox_transform/test_get_center_crop_bbox.py
|
xinshuoweng/cv_ml_tool
|
1918b9e37ec5fb8148b8a089f226a4864d67b153
|
[
"MIT"
] | null | null | null |
xinshuo_math/test/bbox_transform/test_get_center_crop_bbox.py
|
xinshuoweng/cv_ml_tool
|
1918b9e37ec5fb8148b8a089f226a4864d67b153
|
[
"MIT"
] | 12
|
2020-07-06T05:06:58.000Z
|
2021-11-18T14:43:20.000Z
|
# Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import numpy as np
import init_paths
from bbox_transform import get_center_crop_bbox
from xinshuo_miscellaneous import CHECK_EQ_NUMPY
def test_get_center_crop_bbox():
print('check basic')
bbox = [1, 1, 10, 10]
crop_bbox = get_center_crop_bbox(bbox)
print(bbox)
print(crop_bbox)
assert CHECK_EQ_NUMPY(crop_bbox, np.array([-4, -4, 10, 10]).reshape((1, 4)))
print('\n\nDONE! SUCCESSFUL!!\n')
if __name__ == '__main__':
test_get_center_crop_bbox()
| 25.6
| 77
| 0.753906
|
8a6def3be0ad48b611e249a26b1b1a7e72752600
| 30
|
py
|
Python
|
src/bones-ipykernel/bones/kernel/__init__.py
|
DangerMouseB/bones
|
e485611f092a3a85ed4620f13a2460f695cfc9ef
|
[
"BSD-3-Clause"
] | null | null | null |
src/bones-ipykernel/bones/kernel/__init__.py
|
DangerMouseB/bones
|
e485611f092a3a85ed4620f13a2460f695cfc9ef
|
[
"BSD-3-Clause"
] | null | null | null |
src/bones-ipykernel/bones/kernel/__init__.py
|
DangerMouseB/bones
|
e485611f092a3a85ed4620f13a2460f695cfc9ef
|
[
"BSD-3-Clause"
] | null | null | null |
from ._core import MultiKernel
| 30
| 30
| 0.866667
|
77bedc39a6594ad9afa067f83555317589e61400
| 1,414
|
py
|
Python
|
logindefender_tb.py
|
corerd/PyDomo
|
3a576e047f52b9e80bd79641694fae6b91724a22
|
[
"MIT"
] | null | null | null |
logindefender_tb.py
|
corerd/PyDomo
|
3a576e047f52b9e80bd79641694fae6b91724a22
|
[
"MIT"
] | null | null | null |
logindefender_tb.py
|
corerd/PyDomo
|
3a576e047f52b9e80bd79641694fae6b91724a22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Append to PYTHONPATH the path of the script from which it runs.
Ref. http://stackoverflow.com/a/7886092
'''
from web.logindefender import test_bench
def run():
'''Returns status code
'''
return test_bench()
if __name__ == "__main__":
exit(run())
| 35.35
| 80
| 0.75389
|
f0f4a924976bb11c65ec3c47f939f3b42a71e01c
| 36,715
|
py
|
Python
|
tests/integ/test_workflow.py
|
sayonkumarsaha/sagemaker-python-sdk
|
f399fb741984f4577ee70977a6ae5047e198a543
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_workflow.py
|
sayonkumarsaha/sagemaker-python-sdk
|
f399fb741984f4577ee70977a6ae5047e198a543
|
[
"Apache-2.0"
] | null | null | null |
tests/integ/test_workflow.py
|
sayonkumarsaha/sagemaker-python-sdk
|
f399fb741984f4577ee70977a6ae5047e198a543
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import os
import re
import subprocess
import time
import uuid
import pytest
from botocore.exceptions import WaiterError
from sagemaker.debugger import (
DebuggerHookConfig,
Rule,
rule_configs,
)
from datetime import datetime
from sagemaker.inputs import CreateModelInput, TrainingInput
from sagemaker.model import Model
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.pytorch.estimator import PyTorch
from sagemaker.s3 import S3Uploader
from sagemaker.session import get_execution_role
from sagemaker.sklearn.estimator import SKLearn
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.spark.processing import PySparkProcessor, SparkJarProcessor
from sagemaker.workflow.conditions import ConditionGreaterThanOrEqualTo
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.dataset_definition.inputs import DatasetDefinition, AthenaDatasetDefinition
from sagemaker.workflow.execution_variables import ExecutionVariables
from sagemaker.workflow.functions import Join
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
)
from sagemaker.workflow.steps import (
CreateModelStep,
ProcessingStep,
TrainingStep,
CacheConfig,
)
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.pipeline import Pipeline
from tests.integ import DATA_DIR
def ordered(obj):
"""Helper function for dict comparison"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
@pytest.fixture(scope="module")
def region_name(sagemaker_session):
return sagemaker_session.boto_session.region_name
@pytest.fixture(scope="module")
def role(sagemaker_session):
return get_execution_role(sagemaker_session)
@pytest.fixture(scope="module")
def script_dir():
return os.path.join(DATA_DIR, "sklearn_processing")
@pytest.fixture
def pipeline_name():
return f"my-pipeline-{int(time.time() * 10**7)}"
@pytest.fixture
def athena_dataset_definition(sagemaker_session):
return DatasetDefinition(
local_path="/opt/ml/processing/input/add",
data_distribution_type="FullyReplicated",
input_mode="File",
athena_dataset_definition=AthenaDatasetDefinition(
catalog="AwsDataCatalog",
database="default",
work_group="workgroup",
query_string='SELECT * FROM "default"."s3_test_table_$STAGE_$REGIONUNDERSCORED";',
output_s3_uri=f"s3://{sagemaker_session.default_bucket()}/add",
output_format="JSON",
output_compression="GZIP",
),
)
@pytest.fixture
def configuration() -> list:
configuration = [
{
"Classification": "spark-defaults",
"Properties": {"spark.executor.memory": "2g", "spark.executor.cores": "1"},
},
{
"Classification": "hadoop-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {
"HADOOP_DATANODE_HEAPSIZE": "2048",
"HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",
},
"Configurations": [],
}
],
},
{
"Classification": "core-site",
"Properties": {"spark.executor.memory": "2g", "spark.executor.cores": "1"},
},
{"Classification": "hadoop-log4j", "Properties": {"key": "value"}},
{
"Classification": "hive-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {
"HADOOP_DATANODE_HEAPSIZE": "2048",
"HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",
},
"Configurations": [],
}
],
},
{"Classification": "hive-log4j", "Properties": {"key": "value"}},
{"Classification": "hive-exec-log4j", "Properties": {"key": "value"}},
{"Classification": "hive-site", "Properties": {"key": "value"}},
{"Classification": "spark-defaults", "Properties": {"key": "value"}},
{
"Classification": "spark-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {
"HADOOP_DATANODE_HEAPSIZE": "2048",
"HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",
},
"Configurations": [],
}
],
},
{"Classification": "spark-log4j", "Properties": {"key": "value"}},
{"Classification": "spark-hive-site", "Properties": {"key": "value"}},
{"Classification": "spark-metrics", "Properties": {"key": "value"}},
{"Classification": "yarn-site", "Properties": {"key": "value"}},
{
"Classification": "yarn-env",
"Properties": {},
"Configurations": [
{
"Classification": "export",
"Properties": {
"HADOOP_DATANODE_HEAPSIZE": "2048",
"HADOOP_NAMENODE_OPTS": "-XX:GCTimeRatio=19",
},
"Configurations": [],
}
],
},
]
return configuration
@pytest.fixture(scope="module")
def build_jar():
spark_path = os.path.join(DATA_DIR, "spark")
java_file_path = os.path.join("com", "amazonaws", "sagemaker", "spark", "test")
java_version_pattern = r"(\d+\.\d+).*"
jar_file_path = os.path.join(spark_path, "code", "java", "hello-java-spark")
# compile java file
java_version = subprocess.check_output(["java", "-version"], stderr=subprocess.STDOUT).decode(
"utf-8"
)
java_version = re.search(java_version_pattern, java_version).groups()[0]
if float(java_version) > 1.8:
subprocess.run(
[
"javac",
"--release",
"8",
os.path.join(jar_file_path, java_file_path, "HelloJavaSparkApp.java"),
]
)
else:
subprocess.run(
["javac", os.path.join(jar_file_path, java_file_path, "HelloJavaSparkApp.java")]
)
subprocess.run(
[
"jar",
"cfm",
os.path.join(jar_file_path, "hello-spark-java.jar"),
os.path.join(jar_file_path, "manifest.txt"),
"-C",
jar_file_path,
".",
]
)
yield
subprocess.run(["rm", os.path.join(jar_file_path, "hello-spark-java.jar")])
subprocess.run(["rm", os.path.join(jar_file_path, java_file_path, "HelloJavaSparkApp.class")])
def test_three_step_definition(
sagemaker_session,
region_name,
role,
script_dir,
pipeline_name,
athena_dataset_definition,
):
framework_version = "0.20.0"
instance_type = ParameterString(name="InstanceType", default_value="ml.m5.xlarge")
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
output_prefix = ParameterString(name="OutputPrefix", default_value="output")
input_data = f"s3://sagemaker-sample-data-{region_name}/processing/census/census-income.csv"
sklearn_processor = SKLearnProcessor(
framework_version=framework_version,
instance_type=instance_type,
instance_count=instance_count,
base_job_name="test-sklearn",
sagemaker_session=sagemaker_session,
role=role,
)
step_process = ProcessingStep(
name="my-process",
processor=sklearn_processor,
inputs=[
ProcessingInput(source=input_data, destination="/opt/ml/processing/input"),
ProcessingInput(dataset_definition=athena_dataset_definition),
],
outputs=[
ProcessingOutput(output_name="train_data", source="/opt/ml/processing/train"),
ProcessingOutput(
output_name="test_data",
source="/opt/ml/processing/test",
destination=Join(
on="/",
values=[
"s3:/",
sagemaker_session.default_bucket(),
"test-sklearn",
output_prefix,
ExecutionVariables.PIPELINE_EXECUTION_ID,
],
),
),
],
code=os.path.join(script_dir, "preprocessing.py"),
)
sklearn_train = SKLearn(
framework_version=framework_version,
entry_point=os.path.join(script_dir, "train.py"),
instance_type=instance_type,
sagemaker_session=sagemaker_session,
role=role,
)
step_train = TrainingStep(
name="my-train",
estimator=sklearn_train,
inputs=TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train_data"
].S3Output.S3Uri
),
)
model = Model(
image_uri=sklearn_train.image_uri,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
model_inputs = CreateModelInput(
instance_type="ml.m5.large",
accelerator_type="ml.eia1.medium",
)
step_model = CreateModelStep(
name="my-model",
model=model,
inputs=model_inputs,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_type, instance_count, output_prefix],
steps=[step_process, step_train, step_model],
sagemaker_session=sagemaker_session,
)
definition = json.loads(pipeline.definition())
assert definition["Version"] == "2020-12-01"
assert set(tuple(param.items()) for param in definition["Parameters"]) == set(
[
tuple(
{"Name": "InstanceType", "Type": "String", "DefaultValue": "ml.m5.xlarge"}.items()
),
tuple({"Name": "InstanceCount", "Type": "Integer", "DefaultValue": 1}.items()),
tuple({"Name": "OutputPrefix", "Type": "String", "DefaultValue": "output"}.items()),
]
)
steps = definition["Steps"]
assert len(steps) == 3
names_and_types = []
processing_args = {}
training_args = {}
for step in steps:
names_and_types.append((step["Name"], step["Type"]))
if step["Type"] == "Processing":
processing_args = step["Arguments"]
if step["Type"] == "Training":
training_args = step["Arguments"]
if step["Type"] == "Model":
model_args = step["Arguments"]
assert set(names_and_types) == set(
[
("my-process", "Processing"),
("my-train", "Training"),
("my-model", "Model"),
]
)
assert processing_args["ProcessingResources"]["ClusterConfig"] == {
"InstanceType": {"Get": "Parameters.InstanceType"},
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"VolumeSizeInGB": 30,
}
assert training_args["ResourceConfig"] == {
"InstanceCount": 1,
"InstanceType": {"Get": "Parameters.InstanceType"},
"VolumeSizeInGB": 30,
}
assert training_args["InputDataConfig"][0]["DataSource"]["S3DataSource"]["S3Uri"] == {
"Get": "Steps.my-process.ProcessingOutputConfig.Outputs['train_data'].S3Output.S3Uri"
}
assert model_args["PrimaryContainer"]["ModelDataUrl"] == {
"Get": "Steps.my-train.ModelArtifacts.S3ModelArtifacts"
}
try:
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
create_arn,
)
finally:
try:
pipeline.delete()
except Exception:
pass
def test_one_step_sklearn_processing_pipeline(
sagemaker_session,
role,
sklearn_latest_version,
cpu_instance_type,
pipeline_name,
region_name,
athena_dataset_definition,
):
instance_count = ParameterInteger(name="InstanceCount", default_value=2)
script_path = os.path.join(DATA_DIR, "dummy_script.py")
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
inputs = [
ProcessingInput(source=input_file_path, destination="/opt/ml/processing/inputs/"),
ProcessingInput(dataset_definition=athena_dataset_definition),
]
cache_config = CacheConfig(enable_caching=True, expire_after="T30m")
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=role,
instance_type=cpu_instance_type,
instance_count=instance_count,
command=["python3"],
sagemaker_session=sagemaker_session,
base_job_name="test-sklearn",
)
step_sklearn = ProcessingStep(
name="sklearn-process",
processor=sklearn_processor,
inputs=inputs,
code=script_path,
cache_config=cache_config,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count],
steps=[step_sklearn],
sagemaker_session=sagemaker_session,
)
try:
# NOTE: We should exercise the case when role used in the pipeline execution is
# different than that required of the steps in the pipeline itself. The role in
# the pipeline definition needs to create training and processing jobs and other
# sagemaker entities. However, the jobs created in the steps themselves execute
# under a potentially different role, often requiring access to S3 and other
# artifacts not required to during creation of the jobs in the pipeline steps.
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
create_arn,
)
pipeline.parameters = [ParameterInteger(name="InstanceCount", default_value=1)]
response = pipeline.update(role)
update_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
update_arn,
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
response = execution.describe()
assert response["PipelineArn"] == create_arn
# Check CacheConfig
response = json.loads(pipeline.describe()["PipelineDefinition"])["Steps"][0]["CacheConfig"]
assert response["Enabled"] == cache_config.enable_caching
assert response["ExpireAfter"] == cache_config.expire_after
try:
execution.wait(delay=30, max_attempts=3)
except WaiterError:
pass
execution_steps = execution.list_steps()
assert len(execution_steps) == 1
assert execution_steps[0]["StepName"] == "sklearn-process"
finally:
try:
pipeline.delete()
except Exception:
pass
def test_one_step_pyspark_processing_pipeline(
sagemaker_session,
role,
cpu_instance_type,
pipeline_name,
region_name,
):
instance_count = ParameterInteger(name="InstanceCount", default_value=2)
script_path = os.path.join(DATA_DIR, "dummy_script.py")
cache_config = CacheConfig(enable_caching=True, expire_after="T30m")
pyspark_processor = PySparkProcessor(
base_job_name="sm-spark",
framework_version="2.4",
role=role,
instance_count=instance_count,
instance_type=cpu_instance_type,
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
spark_run_args = pyspark_processor.get_run_args(
submit_app=script_path,
arguments=[
"--s3_input_bucket",
sagemaker_session.default_bucket(),
"--s3_input_key_prefix",
"spark-input",
"--s3_output_bucket",
sagemaker_session.default_bucket(),
"--s3_output_key_prefix",
"spark-output",
],
)
step_pyspark = ProcessingStep(
name="pyspark-process",
processor=pyspark_processor,
inputs=spark_run_args.inputs,
outputs=spark_run_args.outputs,
job_arguments=spark_run_args.arguments,
code=spark_run_args.code,
cache_config=cache_config,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count],
steps=[step_pyspark],
sagemaker_session=sagemaker_session,
)
try:
# NOTE: We should exercise the case when role used in the pipeline execution is
# different than that required of the steps in the pipeline itself. The role in
# the pipeline definition needs to create training and processing jobs and other
# sagemaker entities. However, the jobs created in the steps themselves execute
# under a potentially different role, often requiring access to S3 and other
# artifacts not required to during creation of the jobs in the pipeline steps.
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
create_arn,
)
pipeline.parameters = [ParameterInteger(name="InstanceCount", default_value=1)]
response = pipeline.update(role)
update_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
update_arn,
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
response = execution.describe()
assert response["PipelineArn"] == create_arn
# Check CacheConfig
response = json.loads(pipeline.describe()["PipelineDefinition"])["Steps"][0]["CacheConfig"]
assert response["Enabled"] == cache_config.enable_caching
assert response["ExpireAfter"] == cache_config.expire_after
try:
execution.wait(delay=30, max_attempts=3)
except WaiterError:
pass
execution_steps = execution.list_steps()
assert len(execution_steps) == 1
assert execution_steps[0]["StepName"] == "pyspark-process"
finally:
try:
pipeline.delete()
except Exception:
pass
def test_one_step_sparkjar_processing_pipeline(
sagemaker_session, role, cpu_instance_type, pipeline_name, region_name, configuration, build_jar
):
instance_count = ParameterInteger(name="InstanceCount", default_value=2)
cache_config = CacheConfig(enable_caching=True, expire_after="T30m")
spark_path = os.path.join(DATA_DIR, "spark")
spark_jar_processor = SparkJarProcessor(
role=role,
instance_count=2,
instance_type=cpu_instance_type,
sagemaker_session=sagemaker_session,
framework_version="2.4",
)
bucket = spark_jar_processor.sagemaker_session.default_bucket()
with open(os.path.join(spark_path, "files", "data.jsonl")) as data:
body = data.read()
input_data_uri = f"s3://{bucket}/spark/input/data.jsonl"
S3Uploader.upload_string_as_file_body(
body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session
)
output_data_uri = f"s3://{bucket}/spark/output/sales/{datetime.now().isoformat()}"
java_project_dir = os.path.join(spark_path, "code", "java", "hello-java-spark")
spark_run_args = spark_jar_processor.get_run_args(
submit_app=f"{java_project_dir}/hello-spark-java.jar",
submit_class="com.amazonaws.sagemaker.spark.test.HelloJavaSparkApp",
arguments=["--input", input_data_uri, "--output", output_data_uri],
configuration=configuration,
)
step_pyspark = ProcessingStep(
name="sparkjar-process",
processor=spark_jar_processor,
inputs=spark_run_args.inputs,
outputs=spark_run_args.outputs,
job_arguments=spark_run_args.arguments,
code=spark_run_args.code,
cache_config=cache_config,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count],
steps=[step_pyspark],
sagemaker_session=sagemaker_session,
)
try:
# NOTE: We should exercise the case when role used in the pipeline execution is
# different than that required of the steps in the pipeline itself. The role in
# the pipeline definition needs to create training and processing jobs and other
# sagemaker entities. However, the jobs created in the steps themselves execute
# under a potentially different role, often requiring access to S3 and other
# artifacts not required to during creation of the jobs in the pipeline steps.
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
create_arn,
)
pipeline.parameters = [ParameterInteger(name="InstanceCount", default_value=1)]
response = pipeline.update(role)
update_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
update_arn,
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
response = execution.describe()
assert response["PipelineArn"] == create_arn
# Check CacheConfig
response = json.loads(pipeline.describe()["PipelineDefinition"])["Steps"][0]["CacheConfig"]
assert response["Enabled"] == cache_config.enable_caching
assert response["ExpireAfter"] == cache_config.expire_after
try:
execution.wait(delay=30, max_attempts=3)
except WaiterError:
pass
execution_steps = execution.list_steps()
assert len(execution_steps) == 1
assert execution_steps[0]["StepName"] == "sparkjar-process"
finally:
try:
pipeline.delete()
except Exception:
pass
def test_conditional_pytorch_training_model_registration(
sagemaker_session,
role,
cpu_instance_type,
pipeline_name,
region_name,
):
base_dir = os.path.join(DATA_DIR, "pytorch_mnist")
entry_point = os.path.join(base_dir, "mnist.py")
input_path = sagemaker_session.upload_data(
path=os.path.join(base_dir, "training"),
key_prefix="integ-test-data/pytorch_mnist/training",
)
inputs = TrainingInput(s3_data=input_path)
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
instance_type = ParameterString(name="InstanceType", default_value="ml.m5.xlarge")
good_enough_input = ParameterInteger(name="GoodEnoughInput", default_value=1)
pytorch_estimator = PyTorch(
entry_point=entry_point,
role=role,
framework_version="1.5.0",
py_version="py3",
instance_count=instance_count,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
)
step_train = TrainingStep(
name="pytorch-train",
estimator=pytorch_estimator,
inputs=inputs,
)
step_register = RegisterModel(
name="pytorch-register-model",
estimator=pytorch_estimator,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["*"],
response_types=["*"],
inference_instances=["*"],
transform_instances=["*"],
description="test-description",
)
model = Model(
image_uri=pytorch_estimator.training_image_uri(),
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
model_inputs = CreateModelInput(
instance_type="ml.m5.large",
accelerator_type="ml.eia1.medium",
)
step_model = CreateModelStep(
name="pytorch-model",
model=model,
inputs=model_inputs,
)
step_cond = ConditionStep(
name="cond-good-enough",
conditions=[ConditionGreaterThanOrEqualTo(left=good_enough_input, right=1)],
if_steps=[step_train, step_register],
else_steps=[step_model],
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[good_enough_input, instance_count, instance_type],
steps=[step_cond],
sagemaker_session=sagemaker_session,
)
try:
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}", create_arn
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
execution = pipeline.start(parameters={"GoodEnoughInput": 0})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
finally:
try:
pipeline.delete()
except Exception:
pass
def test_model_registration_with_model_repack(
sagemaker_session,
role,
pipeline_name,
region_name,
):
base_dir = os.path.join(DATA_DIR, "pytorch_mnist")
entry_point = os.path.join(base_dir, "mnist.py")
input_path = sagemaker_session.upload_data(
path=os.path.join(base_dir, "training"),
key_prefix="integ-test-data/pytorch_mnist/training",
)
inputs = TrainingInput(s3_data=input_path)
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
instance_type = ParameterString(name="InstanceType", default_value="ml.m5.xlarge")
good_enough_input = ParameterInteger(name="GoodEnoughInput", default_value=1)
pytorch_estimator = PyTorch(
entry_point=entry_point,
role=role,
framework_version="1.5.0",
py_version="py3",
instance_count=instance_count,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
)
step_train = TrainingStep(
name="pytorch-train",
estimator=pytorch_estimator,
inputs=inputs,
)
step_register = RegisterModel(
name="pytorch-register-model",
estimator=pytorch_estimator,
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["*"],
response_types=["*"],
inference_instances=["*"],
transform_instances=["*"],
description="test-description",
entry_point=entry_point,
)
model = Model(
image_uri=pytorch_estimator.training_image_uri(),
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=role,
)
model_inputs = CreateModelInput(
instance_type="ml.m5.large",
accelerator_type="ml.eia1.medium",
)
step_model = CreateModelStep(
name="pytorch-model",
model=model,
inputs=model_inputs,
)
step_cond = ConditionStep(
name="cond-good-enough",
conditions=[ConditionGreaterThanOrEqualTo(left=good_enough_input, right=1)],
if_steps=[step_train, step_register],
else_steps=[step_model],
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[good_enough_input, instance_count, instance_type],
steps=[step_cond],
sagemaker_session=sagemaker_session,
)
try:
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}", create_arn
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
execution = pipeline.start(parameters={"GoodEnoughInput": 0})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
finally:
try:
pipeline.delete()
except Exception:
pass
def test_training_job_with_debugger_and_profiler(
sagemaker_session,
pipeline_name,
role,
pytorch_training_latest_version,
pytorch_training_latest_py_version,
):
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
instance_type = ParameterString(name="InstanceType", default_value="ml.m5.xlarge")
rules = [
Rule.sagemaker(rule_configs.vanishing_gradient()),
Rule.sagemaker(base_config=rule_configs.all_zero(), rule_parameters={"tensor_regex": ".*"}),
Rule.sagemaker(rule_configs.loss_not_decreasing()),
]
debugger_hook_config = DebuggerHookConfig(
s3_output_path=f"s3://{sagemaker_session.default_bucket()}/{uuid.uuid4()}/tensors"
)
base_dir = os.path.join(DATA_DIR, "pytorch_mnist")
script_path = os.path.join(base_dir, "mnist.py")
input_path = sagemaker_session.upload_data(
path=os.path.join(base_dir, "training"),
key_prefix="integ-test-data/pytorch_mnist/training",
)
inputs = TrainingInput(s3_data=input_path)
pytorch_estimator = PyTorch(
entry_point=script_path,
role="SageMakerRole",
framework_version=pytorch_training_latest_version,
py_version=pytorch_training_latest_py_version,
instance_count=instance_count,
instance_type=instance_type,
sagemaker_session=sagemaker_session,
rules=rules,
debugger_hook_config=debugger_hook_config,
)
step_train = TrainingStep(
name="pytorch-train",
estimator=pytorch_estimator,
inputs=inputs,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count, instance_type],
steps=[step_train],
sagemaker_session=sagemaker_session,
)
try:
response = pipeline.create(role)
create_arn = response["PipelineArn"]
execution = pipeline.start()
response = execution.describe()
assert response["PipelineArn"] == create_arn
try:
execution.wait(delay=10, max_attempts=60)
except WaiterError:
pass
execution_steps = execution.list_steps()
assert len(execution_steps) == 1
assert execution_steps[0].get("FailureReason", "") == ""
assert execution_steps[0]["StepName"] == "pytorch-train"
assert execution_steps[0]["StepStatus"] == "Succeeded"
training_job_arn = execution_steps[0]["Metadata"]["TrainingJob"]["Arn"]
job_description = sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=training_job_arn.split("/")[1]
)
for index, rule in enumerate(rules):
config = job_description["DebugRuleConfigurations"][index]
assert config["RuleConfigurationName"] == rule.name
assert config["RuleEvaluatorImage"] == rule.image_uri
assert config["VolumeSizeInGB"] == 0
assert (
config["RuleParameters"]["rule_to_invoke"] == rule.rule_parameters["rule_to_invoke"]
)
assert job_description["DebugHookConfig"] == debugger_hook_config._to_request_dict()
assert job_description["ProfilingStatus"] == "Enabled"
assert job_description["ProfilerConfig"]["ProfilingIntervalInMilliseconds"] == 500
finally:
try:
pipeline.delete()
except Exception:
pass
def test_two_processing_job_depends_on(
sagemaker_session,
role,
pipeline_name,
region_name,
cpu_instance_type,
):
instance_count = ParameterInteger(name="InstanceCount", default_value=2)
script_path = os.path.join(DATA_DIR, "dummy_script.py")
pyspark_processor = PySparkProcessor(
base_job_name="sm-spark",
framework_version="2.4",
role=role,
instance_count=instance_count,
instance_type=cpu_instance_type,
max_runtime_in_seconds=1200,
sagemaker_session=sagemaker_session,
)
spark_run_args = pyspark_processor.get_run_args(
submit_app=script_path,
arguments=[
"--s3_input_bucket",
sagemaker_session.default_bucket(),
"--s3_input_key_prefix",
"spark-input",
"--s3_output_bucket",
sagemaker_session.default_bucket(),
"--s3_output_key_prefix",
"spark-output",
],
)
step_pyspark_1 = ProcessingStep(
name="pyspark-process-1",
processor=pyspark_processor,
inputs=spark_run_args.inputs,
outputs=spark_run_args.outputs,
job_arguments=spark_run_args.arguments,
code=spark_run_args.code,
)
step_pyspark_2 = ProcessingStep(
name="pyspark-process-2",
depends_on=[step_pyspark_1.name],
processor=pyspark_processor,
inputs=spark_run_args.inputs,
outputs=spark_run_args.outputs,
job_arguments=spark_run_args.arguments,
code=spark_run_args.code,
)
pipeline = Pipeline(
name=pipeline_name,
parameters=[instance_count],
steps=[step_pyspark_1, step_pyspark_2],
sagemaker_session=sagemaker_session,
)
try:
response = pipeline.create(role)
create_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
create_arn,
)
pipeline.parameters = [ParameterInteger(name="InstanceCount", default_value=1)]
response = pipeline.update(role)
update_arn = response["PipelineArn"]
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
update_arn,
)
execution = pipeline.start(parameters={})
assert re.match(
fr"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
execution.arn,
)
response = execution.describe()
assert response["PipelineArn"] == create_arn
try:
execution.wait(delay=60)
except WaiterError:
pass
execution_steps = execution.list_steps()
assert len(execution_steps) == 2
time_stamp = {}
for execution_step in execution_steps:
name = execution_step["StepName"]
if name == "pyspark-process-1":
time_stamp[name] = execution_step["EndTime"]
else:
time_stamp[name] = execution_step["StartTime"]
assert time_stamp["pyspark-process-1"] < time_stamp["pyspark-process-2"]
finally:
try:
pipeline.delete()
except Exception:
pass
| 34.026877
| 100
| 0.629334
|
357d0ca31018948771c4c744f6e616edfa4f83b8
| 25,763
|
py
|
Python
|
sdk/python/pulumi_gcp/bigquery/iam_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/bigquery/iam_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/bigquery/iam_policy.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IamPolicyArgs', 'IamPolicy']
@pulumi.input_type
class IamPolicyArgs:
def __init__(__self__, *,
dataset_id: pulumi.Input[str],
policy_data: pulumi.Input[str],
table_id: pulumi.Input[str],
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IamPolicy resource.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
pulumi.set(__self__, "dataset_id", dataset_id)
pulumi.set(__self__, "policy_data", policy_data)
pulumi.set(__self__, "table_id", table_id)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "dataset_id")
@dataset_id.setter
def dataset_id(self, value: pulumi.Input[str]):
pulumi.set(self, "dataset_id", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Input[str]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_data", value)
@property
@pulumi.getter(name="tableId")
def table_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "table_id")
@table_id.setter
def table_id(self, value: pulumi.Input[str]):
pulumi.set(self, "table_id", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _IamPolicyState:
def __init__(__self__, *,
dataset_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IamPolicy resources.
:param pulumi.Input[str] etag: (Computed) The etag of the IAM policy.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
if dataset_id is not None:
pulumi.set(__self__, "dataset_id", dataset_id)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if policy_data is not None:
pulumi.set(__self__, "policy_data", policy_data)
if project is not None:
pulumi.set(__self__, "project", project)
if table_id is not None:
pulumi.set(__self__, "table_id", table_id)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "dataset_id")
@dataset_id.setter
def dataset_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_id", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> Optional[pulumi.Input[str]]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_data", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="tableId")
def table_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "table_id")
@table_id.setter
def table_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "table_id", value)
class IamPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Three different resources help you manage your IAM policy for BigQuery Table. Each of these resources serves a different use case:
* `bigquery.IamPolicy`: Authoritative. Sets the IAM policy for the table and replaces any existing policy already attached.
* `bigquery.IamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the table are preserved.
* `bigquery.IamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the table are preserved.
> **Note:** `bigquery.IamPolicy` **cannot** be used in conjunction with `bigquery.IamBinding` and `bigquery.IamMember` or they will fight over what your policy should be.
> **Note:** `bigquery.IamBinding` resources **can be** used in conjunction with `bigquery.IamMember` resources **only if** they do not grant privilege to the same role.
## google\_bigquery\_table\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
)])
policy = gcp.bigquery.IamPolicy("policy",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
),
)])
policy = gcp.bigquery.IamPolicy("policy",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
policy_data=admin.policy_data)
```
## google\_bigquery\_table\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.bigquery.IamBinding("binding",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"])
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.bigquery.IamBinding("binding",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
condition=gcp.bigquery.IamBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## google\_bigquery\_table\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.bigquery.IamMember("member",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
member="user:jane@example.com")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.bigquery.IamMember("member",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
member="user:jane@example.com",
condition=gcp.bigquery.IamMemberConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## Import
For all import syntaxes, the "resource in question" can take any of the following forms* projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} * {{project}}/{{dataset_id}}/{{table_id}} * {{dataset_id}}/{{table_id}} * {{table_id}} Any variables not passed in the import command will be taken from the provider configuration. BigQuery table IAM resources can be imported using the resource identifiers, role, and member. IAM member imports use space-delimited identifiersthe resource in question, the role, and the member identity, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} roles/bigquery.dataOwner user:jane@example.com"
```
IAM binding imports use space-delimited identifiersthe resource in question and the role, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} roles/bigquery.dataOwner"
```
IAM policy imports use the identifier of the resource in question, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IamPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Three different resources help you manage your IAM policy for BigQuery Table. Each of these resources serves a different use case:
* `bigquery.IamPolicy`: Authoritative. Sets the IAM policy for the table and replaces any existing policy already attached.
* `bigquery.IamBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the table are preserved.
* `bigquery.IamMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the table are preserved.
> **Note:** `bigquery.IamPolicy` **cannot** be used in conjunction with `bigquery.IamBinding` and `bigquery.IamMember` or they will fight over what your policy should be.
> **Note:** `bigquery.IamBinding` resources **can be** used in conjunction with `bigquery.IamMember` resources **only if** they do not grant privilege to the same role.
## google\_bigquery\_table\_iam\_policy
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
)])
policy = gcp.bigquery.IamPolicy("policy",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
),
)])
policy = gcp.bigquery.IamPolicy("policy",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
policy_data=admin.policy_data)
```
## google\_bigquery\_table\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.bigquery.IamBinding("binding",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"])
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
binding = gcp.bigquery.IamBinding("binding",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
members=["user:jane@example.com"],
condition=gcp.bigquery.IamBindingConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## google\_bigquery\_table\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.bigquery.IamMember("member",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
member="user:jane@example.com")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
member = gcp.bigquery.IamMember("member",
project=google_bigquery_table["test"]["project"],
dataset_id=google_bigquery_table["test"]["dataset_id"],
table_id=google_bigquery_table["test"]["table_id"],
role="roles/bigquery.dataOwner",
member="user:jane@example.com",
condition=gcp.bigquery.IamMemberConditionArgs(
title="expires_after_2019_12_31",
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
))
```
## Import
For all import syntaxes, the "resource in question" can take any of the following forms* projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} * {{project}}/{{dataset_id}}/{{table_id}} * {{dataset_id}}/{{table_id}} * {{table_id}} Any variables not passed in the import command will be taken from the provider configuration. BigQuery table IAM resources can be imported using the resource identifiers, role, and member. IAM member imports use space-delimited identifiersthe resource in question, the role, and the member identity, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} roles/bigquery.dataOwner user:jane@example.com"
```
IAM binding imports use space-delimited identifiersthe resource in question and the role, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}} roles/bigquery.dataOwner"
```
IAM policy imports use the identifier of the resource in question, e.g.
```sh
$ pulumi import gcp:bigquery/iamPolicy:IamPolicy editor projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param IamPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IamPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IamPolicyArgs.__new__(IamPolicyArgs)
if dataset_id is None and not opts.urn:
raise TypeError("Missing required property 'dataset_id'")
__props__.__dict__["dataset_id"] = dataset_id
if policy_data is None and not opts.urn:
raise TypeError("Missing required property 'policy_data'")
__props__.__dict__["policy_data"] = policy_data
__props__.__dict__["project"] = project
if table_id is None and not opts.urn:
raise TypeError("Missing required property 'table_id'")
__props__.__dict__["table_id"] = table_id
__props__.__dict__["etag"] = None
super(IamPolicy, __self__).__init__(
'gcp:bigquery/iamPolicy:IamPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dataset_id: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
table_id: Optional[pulumi.Input[str]] = None) -> 'IamPolicy':
"""
Get an existing IamPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: (Computed) The etag of the IAM policy.
:param pulumi.Input[str] policy_data: The policy data generated by
a `organizations.get_iam_policy` data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IamPolicyState.__new__(_IamPolicyState)
__props__.__dict__["dataset_id"] = dataset_id
__props__.__dict__["etag"] = etag
__props__.__dict__["policy_data"] = policy_data
__props__.__dict__["project"] = project
__props__.__dict__["table_id"] = table_id
return IamPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="datasetId")
def dataset_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "dataset_id")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Output[str]:
"""
The policy data generated by
a `organizations.get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="tableId")
def table_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "table_id")
| 44.572664
| 553
| 0.639561
|
074caa89d4d5900f911b903ac8673f9bbe14a98e
| 4,174
|
py
|
Python
|
tensorflow/reader.py
|
reachlin/machinelearning
|
eb8ba02aa0da86ccf9991fa609afa84d8c180a21
|
[
"MIT"
] | 11
|
2017-12-05T17:37:18.000Z
|
2020-07-01T21:47:31.000Z
|
tensorflow/reader.py
|
reachlin/machinelearning
|
eb8ba02aa0da86ccf9991fa609afa84d8c180a21
|
[
"MIT"
] | null | null | null |
tensorflow/reader.py
|
reachlin/machinelearning
|
eb8ba02aa0da86ccf9991fa609afa84d8c180a21
|
[
"MIT"
] | 6
|
2017-09-11T12:31:19.000Z
|
2020-12-13T16:28:48.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for parsing PTB text files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import sys
import tensorflow as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def ptb_raw_data(data_path=None):
"""Load PTB raw data from data directory "data_path".
Reads PTB text files, converts strings to integer ids,
and performs mini-batching of the inputs.
The PTB dataset comes from Tomas Mikolov's webpage:
http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
Args:
data_path: string path to the directory where simple-examples.tgz has
been extracted.
Returns:
tuple (train_data, valid_data, test_data, vocabulary)
where each of the data objects can be passed to PTBIterator.
"""
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
word_to_id = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
return train_data, valid_data, test_data, vocabulary
def ptb_producer(raw_data, batch_size, num_steps, name=None):
"""Iterate on the raw PTB data.
This chunks up raw_data into batches of examples and returns Tensors that
are drawn from these batches.
Args:
raw_data: one of the raw data outputs from ptb_raw_data.
batch_size: int, the batch size.
num_steps: int, the number of unrolls.
name: the name of this operation (optional).
Returns:
A pair of Tensors, each shaped [batch_size, num_steps]. The second element
of the tuple is the same data time-shifted to the right by one.
Raises:
tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
"""
with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
assertion = tf.assert_positive(
epoch_size,
message="epoch_size == 0, decrease batch_size or num_steps")
with tf.control_dependencies([assertion]):
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps],
[batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
y = tf.strided_slice(data, [0, i * num_steps + 1],
[batch_size, (i + 1) * num_steps + 1])
y.set_shape([batch_size, num_steps])
return x, y
| 33.934959
| 80
| 0.702444
|
dc8c76589e43ca749bbe0b37bec8ba3dd578321b
| 8,169
|
py
|
Python
|
tests/oss_fuzz/test_oss_fuzz_bug_report_parser.py
|
zhenyudg/monorail-scraper
|
154a3b809241599fc4bc5096ef1b019752e0de58
|
[
"MIT"
] | 1
|
2021-07-14T00:36:26.000Z
|
2021-07-14T00:36:26.000Z
|
tests/oss_fuzz/test_oss_fuzz_bug_report_parser.py
|
zhenyudg/monorail-scraper
|
154a3b809241599fc4bc5096ef1b019752e0de58
|
[
"MIT"
] | 1
|
2020-09-05T23:17:30.000Z
|
2020-09-05T23:42:05.000Z
|
tests/oss_fuzz/test_oss_fuzz_bug_report_parser.py
|
zhenyudg/monorail-scraper
|
154a3b809241599fc4bc5096ef1b019752e0de58
|
[
"MIT"
] | 1
|
2021-02-02T17:01:03.000Z
|
2021-02-02T17:01:03.000Z
|
from unittest import TestCase
from monorail_scraper.issue.issue_scraper import IssueScraper
from monorail_scraper.oss_fuzz.oss_fuzz_bug_report_parser import *
from monorail_scraper.oss_fuzz.oss_fuzz_bug_report_parser import _get_project, _get_fuzzing_engine, _get_fuzz_target_binary, \
_get_job_type, _get_platform_id, _get_crash_type, _get_crash_address, _get_crash_state, _get_sanitizer, \
_get_regressed_commits_url, _get_fixed_commits_url, _get_testcase_url
from tests.oss_fuzz.oss_fuzz_bug_reports import *
class TestIssueParser(TestCase):
def test_attach_oss_fuzz_bug_report(self):
# smoke test
scraper = IssueScraper()
url_22076 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22076'
issue_22076 = scraper.scrape(url_22076) # bug report
successful = attach_oss_fuzz_bug_report(issue_22076)
self.assertTrue(successful)
self.assertIsNotNone(issue_22076.oss_fuzz_bug_report)
def test_is_oss_fuzz_bug_report(self):
scraper = IssueScraper()
url_22076 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22076'
issue_22076 = scraper.scrape(url_22076) # bug report
url_25371 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=25371'
issue_25371 = scraper.scrape(url_25371) # build failure
url_projzero = 'https://bugs.chromium.org/p/project-zero/issues/detail?id=9&q=&can=1'
issue_projzero = scraper.scrape(url_projzero)
self.assertTrue(is_oss_fuzz_bug_report(issue_22076))
self.assertFalse(is_oss_fuzz_bug_report(issue_25371))
self.assertFalse(is_oss_fuzz_bug_report(issue_projzero))
def test_parse_oss_fuzz_issue_details(self):
scraper = IssueScraper()
url_22076 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22076'
issue_22076 = scraper.scrape(url_22076)
oss_fuzz_issue_details_22076 = parse_oss_fuzz_bug_report_details(issue_22076)
self.assertEqual(oss_fuzz_issue_details_22076.project, 'llvm')
self.assertEqual(oss_fuzz_issue_details_22076.fuzzing_engine, 'libFuzzer')
self.assertEqual(oss_fuzz_issue_details_22076.fuzz_target_binary, 'clang-fuzzer')
self.assertEqual(oss_fuzz_issue_details_22076.job_type, 'libfuzzer_asan_llvm')
self.assertEqual(oss_fuzz_issue_details_22076.platform_id, 'linux')
self.assertEqual(oss_fuzz_issue_details_22076.crash_type, 'Stack-overflow')
self.assertEqual(oss_fuzz_issue_details_22076.crash_addr, '0x7ffeb72c0f48')
self.assertEqual(oss_fuzz_issue_details_22076.crash_state, ('GetFullTypeForDeclarator',
'clang::Sema::GetTypeForDeclarator',
'clang::Sema::ActOnBlockArguments'))
self.assertEqual(oss_fuzz_issue_details_22076.sanitizer, 'address (ASAN)')
self.assertEqual(oss_fuzz_issue_details_22076.regressed_commits_url,
'https://oss-fuzz.com/revisions?job=libfuzzer_asan_llvm&range=202005030248:202005040645')
self.assertEqual(oss_fuzz_issue_details_22076.fixed_commits_url,
'https://oss-fuzz.com/revisions?job=libfuzzer_asan_llvm&range=202005070415:202005080243')
self.assertEqual(oss_fuzz_issue_details_22076.testcase_url,
'https://oss-fuzz.com/download?testcase_id=5196721950031872')
def test_get_proj(self):
proj_5 = _get_project(test_input_5, 5)
self.assertEqual(proj_5, 'libarchive')
proj_22076 = _get_project(test_input_22076, 22076)
self.assertEqual(proj_22076, 'llvm')
def test_get_fuzzing_engine(self):
fzeng_5 = _get_fuzzing_engine(test_input_5, 5)
self.assertEqual(fzeng_5, 'libFuzzer')
fzeng_6531 = _get_fuzzing_engine(test_input_6531, 6531)
self.assertEqual(fzeng_6531, 'js_fuzzer')
fzeng_16307 = _get_fuzzing_engine(test_input_16307, 16307)
self.assertEqual(fzeng_16307, 'afl')
fzeng_22076 = _get_fuzzing_engine(test_input_22076, 22076)
self.assertEqual(fzeng_22076, 'libFuzzer')
def test_get_fuzz_target_binary(self):
fztgt_5 = _get_fuzz_target_binary(test_input_5, 5)
self.assertEqual(fztgt_5, 'libarchive_fuzzer')
fztgt_6531 = _get_fuzz_target_binary(test_input_6531, 6531)
self.assertEqual(fztgt_6531, 'js_fuzzer')
fztgt_16307 = _get_fuzz_target_binary(test_input_16307, 16307)
self.assertEqual(fztgt_16307, 'compress_fuzzer')
fztgt_22076 = _get_fuzz_target_binary(test_input_22076, 22076)
self.assertEqual(fztgt_22076, 'clang-fuzzer')
def test_get_job_type(self):
jobtype = _get_job_type(test_input_22076)
assert jobtype == 'libfuzzer_asan_llvm'
def test_get_platform_id(self):
platform = _get_platform_id(test_input_22076)
assert platform == 'linux'
def test_get_crash_type(self):
crashtype = _get_crash_type(test_input_22076)
assert crashtype == 'Stack-overflow'
def test_get_crash_address(self):
addr_16307 = _get_crash_address(test_input_16307)
assert addr_16307 == ''
addr_22076 = _get_crash_address(test_input_22076)
assert addr_22076 == '0x7ffeb72c0f48'
def test_get_crash_state(self):
crashstate_19429 = _get_crash_state(test_input_19429)
self.assertEqual(crashstate_19429, ('NULL',))
crashstate_22076 = _get_crash_state(test_input_22076)
self.assertEqual(crashstate_22076,
('GetFullTypeForDeclarator', 'clang::Sema::GetTypeForDeclarator', 'clang::Sema::ActOnBlockArguments'))
def test_get_sanitizer(self):
sanitizer_5 = _get_sanitizer(test_input_5, 5)
self.assertEqual(sanitizer_5, 'address (ASAN)')
sanitizer_22076 = _get_sanitizer(test_input_22076, 22076)
self.assertEqual(sanitizer_22076, 'address (ASAN)')
def test_get_regressed_commits_url(self):
regressed_5 = _get_regressed_commits_url(test_input_5)
self.assertIsNone(regressed_5)
regressed_22076 = _get_regressed_commits_url(test_input_22076)
self.assertEqual(regressed_22076, 'https://oss-fuzz.com/revisions?job=libfuzzer_asan_llvm&range=202005030248:202005040645')
regressed_24163 = _get_regressed_commits_url(test_input_24163)
self.assertEqual(regressed_24163, 'https://oss-fuzz.com/revisions?job=libfuzzer_asan_i386_libevt&revision=202007150438')
def test_get_fixed_commits_url(self):
scraper = IssueScraper()
url_5 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=5'
issue_5 = scraper.scrape(url_5)
comments_5 = issue_5.comments
fixed_5 = _get_fixed_commits_url(comments_5, 5)
assert fixed_5 == 'https://clusterfuzz-external.appspot.com/revisions?job=libfuzzer_asan_libarchive&range=201605271439:201605271739'
url_22076 = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=22076'
issue_22076 = scraper.scrape(url_22076)
comments_22076 = issue_22076.comments
fixed_22076 = _get_fixed_commits_url(comments_22076, 22076)
assert fixed_22076 == 'https://oss-fuzz.com/revisions?job=libfuzzer_asan_llvm&range=202005070415:202005080243'
def test_get_testcase_url(self):
tst_5 = _get_testcase_url(test_input_5)
assert tst_5 == 'https://clusterfuzz-external.appspot.com/download/AMIfv97UJ_XegpDWBsRbTqTw-2GXGnM9sFyyhbLgIpxY2I5jzNAiwJF8mF_cBinyVep976oB_sAB_UFxDc_pVduWNXhlHryizcDM7MctFvyTv_IRwGzOvsCkBGkK2xF-83gFeQsuAPS9cVpjOVLxuz3my3T6pEG0D3XyduSUqv6VnLTAKGvtp7E'
tst_126 = _get_testcase_url(test_input_126)
assert tst_126 == 'https://clusterfuzz-external.appspot.com/download/AMIfv94e2eucet3LDQplzG1u73sGldGgS5OJyDfv2uramuXF209jN8Ouy--5rjrrjmsStzerBsPvdYMW0Q4-HM-qvseSDZl1DEqVtGx8Ajwsuvt5Zcql9E42Jt_CACwxxvp0CTz4JeuLyfsdxJPcSop-TKtSb_PNT_X-ONwVEtErCSRsXlAdBg4?testcase_id=6544078783119360'
tst_22076 = _get_testcase_url(test_input_22076)
assert tst_22076 == 'https://oss-fuzz.com/download?testcase_id=5196721950031872'
| 54.099338
| 290
| 0.735953
|
bd8baf9155c0efee3a26ad7a7cf05af206000ca6
| 384
|
py
|
Python
|
stubs/ussl.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
stubs/ussl.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
stubs/ussl.py
|
esbullington/cct
|
1a89d346c76236a9710177a208730584ecb65c02
|
[
"MIT"
] | null | null | null |
"""
Module: 'ussl' on micropython-esp32-1.13-274
"""
# MCU: {'ver': '1.13-274', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.13.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.13.0', 'machine': 'ESP32 module with ESP32', 'build': '274', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.3.9
def wrap_socket():
pass
| 42.666667
| 284
| 0.601563
|
884c1ef3cc4f0473ed6a7f09e1911806f90f7041
| 508
|
py
|
Python
|
Python/p001-Basic-Neuron-3-inputs.py
|
Abhash600/Neural-Networks-with-different-languages
|
448e0798826543d0bad9128d6ef683ba4fff3880
|
[
"MIT"
] | null | null | null |
Python/p001-Basic-Neuron-3-inputs.py
|
Abhash600/Neural-Networks-with-different-languages
|
448e0798826543d0bad9128d6ef683ba4fff3880
|
[
"MIT"
] | null | null | null |
Python/p001-Basic-Neuron-3-inputs.py
|
Abhash600/Neural-Networks-with-different-languages
|
448e0798826543d0bad9128d6ef683ba4fff3880
|
[
"MIT"
] | 1
|
2021-07-30T21:45:21.000Z
|
2021-07-30T21:45:21.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 23:27:29 2020
@author: pranjal27bhardwaj
"""
import numpy as np
#print(inputs)
inputs_1 = [1.2, 5.1, 2.1]
weights = [3.1, 2.1, 8.7]
bias = 3.0
output_1 = inputs_1[0]*weights[0] + inputs_1[1]*weights[1] + inputs_1[2]*weights[2] + bias
print(output_1)
'''
inputs_2 = np.random.rand(3, 1)
weights_2 = np.random.rand(3,1)
output_2 = inputs_2[0]*weights[0] + inputs_2[1]*weights[1] + inputs_2[2]*weights[2] + bias
print(output_2)'''
| 19.538462
| 90
| 0.653543
|
02042f421d2048bf94a783f39331aff9156e843b
| 2,768
|
py
|
Python
|
day22/day22-1.py
|
RobinRH/advent-of-code-2017
|
8996691abf4d9020294e5b750bef1e35effd5c68
|
[
"MIT"
] | null | null | null |
day22/day22-1.py
|
RobinRH/advent-of-code-2017
|
8996691abf4d9020294e5b750bef1e35effd5c68
|
[
"MIT"
] | null | null | null |
day22/day22-1.py
|
RobinRH/advent-of-code-2017
|
8996691abf4d9020294e5b750bef1e35effd5c68
|
[
"MIT"
] | null | null | null |
#answer -> 5256
'''
If the current node is infected, it turns to its right.
Otherwise, it turns to its left. (Turning is done in-place; the current node does not change.)
If the current node is clean, it becomes infected.
Otherwise, it becomes cleaned. (This is done after the node is considered for the purposes of changing direction.)
The virus carrier moves forward one node in the direction it is facing.
'''
import sys
import csv
import numpy as np
from pprint import pprint
def printInfections(infections, (xcarrier, ycarrier)):
map = np.zeros((10,10))
for (row, col) in infections:
map[row, col] = 1
map[xcarrier, ycarrier] = 9
print map
filename = sys.argv[1]
offset = 1000
row = 0
infections = []
with open(filename, 'r') as f:
for line in f:
line = line.replace('\n', '')
lineList = list(line)
for col in range(len(line)):
if lineList[col] == '#':
infections.append((row + offset, col + offset))
row += 1
#pprint(infections)
carrier = (25/2 + offset, 25/2 + offset)
print "carrier: ", carrier
left = 1
right = 2
up = 3
down = 4
#printInfections(infections, carrier)
print
direction = up
total = 0
for i in range(10000):
'''
If the current node is infected, it turns to its right.
Otherwise, it turns to its left. (Turning is done in-place; the current node does not change.)
If the current node is clean, it becomes infected.
Otherwise, it becomes cleaned. (This is done after the node is considered for the purposes of changing direction.)
The virus carrier moves forward one node in the direction it is facing.
'''
#print i+ 1, total
if carrier in infections:
# turn right
if direction == up:
direction = right
elif direction == down:
direction = left
elif direction == left:
direction = up
else:
direction = down
infections.remove(carrier)
#print "removing"
else:
# turn left
if direction == up:
direction = left
elif direction == down:
direction = right
elif direction == left:
direction = down
else:
direction = up
infections.append(carrier)
#print "adding"
total += 1
# move forward one
(row, col) = carrier
if direction == up:
carrier = (row - 1, col)
elif direction == down:
carrier = (row + 1, col)
elif direction == left:
carrier = (row, col - 1)
else: #right
carrier = (row, col + 1)
#print infections
#print "carrier: ", carrier
print "i, total: ", i, total
#printInfections(infections, carrier)
#print
| 25.394495
| 118
| 0.60513
|
28b9a6b6f496e652d6203a07d29a468dd8fb1632
| 1,961
|
py
|
Python
|
main.py
|
NawrasseDahman/Daily-Prayer-Time-API
|
fa1a1c47c04081a7f23f3b6fb978bc317af8ddae
|
[
"MIT"
] | 2
|
2021-09-16T20:04:47.000Z
|
2021-10-09T12:10:48.000Z
|
main.py
|
NawrasseDahman/Daily-Prayer-Time-API
|
fa1a1c47c04081a7f23f3b6fb978bc317af8ddae
|
[
"MIT"
] | null | null | null |
main.py
|
NawrasseDahman/Daily-Prayer-Time-API
|
fa1a1c47c04081a7f23f3b6fb978bc317af8ddae
|
[
"MIT"
] | null | null | null |
import requests
from flask import Flask, jsonify
from threading import Thread
import json
from os import system
import googlesearch
from bs4 import BeautifulSoup
import dateparser
app = Flask('')
app.config['JSON_SORT_KEYS'] = False
@app.route('/')
def home():
return "I'm alive"
@app.route('/api/<string:s>', methods=['GET'])
def prayer(s):
query = str(s + " prayer time site:muslimpro.com")
data = {}
mencari = googlesearch.search(query)
try :
url = str(mencari[0])
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
city = soup.find("h2", attrs ={"class": "place"})
dates = soup.find("h2", attrs ={"class": "date"})
month = soup.find("span", attrs ={"class": "display-month"})
data["city"] = city.get_text()
tanggal = dates.get_text().split()
data["date"] = tanggal[0] + " " + month.get_text()
data["today"] = {}
data["tomorrow"] = {}
waktu = soup.find_all("span", attrs ={"class": "waktu-solat"})
jam = soup.find_all("span", attrs ={"class": "jam-solat"})
for x,y in zip(waktu,jam):
data["today"][x.get_text()] = y.get_text()
names = ["Date","Fajr", "Sunrise", "Dhuhr", "Asr", "Maghrib", "Isha'a"]
try:
tomorrow = soup.find("tr", attrs={"class": "active"}).find_next("tr").find_all("td", attrs={"class": "prayertime"})
for x,y in zip(names,tomorrow):
data["tomorrow"][x] = y.get_text()
except :
month = str(dateparser.parse(data["date"]))[5:7]
url = url + '?date=2021-' + str(int(month)+1)
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
tomorrow = soup.find_all("tr")[1].find_all("td", attrs={"class": "prayertime"})
for x,y in zip(names,tomorrow):
data["tomorrow"][x] = y.get_text()
except :
data["Error"] = "Result Not Found"
return jsonify(data)
def run():
app.run(host='0.0.0.0',port=7000)
t = Thread(target=run)
t.start()
| 33.237288
| 121
| 0.615502
|
5c5b05bf9090a5ccd01142a7c81f682151c46009
| 566
|
py
|
Python
|
setup.py
|
pspiagicw/bluemary
|
da2e1b25138bc311c3309cd142b5fd303f5ad6e0
|
[
"MIT"
] | null | null | null |
setup.py
|
pspiagicw/bluemary
|
da2e1b25138bc311c3309cd142b5fd303f5ad6e0
|
[
"MIT"
] | 8
|
2021-02-15T05:55:25.000Z
|
2021-03-02T07:29:29.000Z
|
setup.py
|
pspiagicw/bluemary
|
da2e1b25138bc311c3309cd142b5fd303f5ad6e0
|
[
"MIT"
] | null | null | null |
import setuptools
setuptools.setup(
name='BlueMary',
version='0.0.1dev',
author='pspiagicw',
author_email='pspiagicw@gmail.com',
license='MIT License',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/pspiagicw/bluemary',
packages=setuptools.find_packages(),
install_requires=[
'npyscreen',
],
entry_points={
'console_scripts':[
'bluemary=bluemary.main:run',
],
},
)
| 22.64
| 50
| 0.583039
|
9177b20583e5991a998881ee67f9e0260f476e2a
| 1,121
|
py
|
Python
|
Gathered CTF writeups/ptr-yudai-writeups/2019/watevrCTF_2019/sabataD/solve.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:00:41.000Z
|
2022-03-27T06:00:41.000Z
|
Gathered CTF writeups/ptr-yudai-writeups/2019/watevrCTF_2019/sabataD/solve.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | null | null | null |
Gathered CTF writeups/ptr-yudai-writeups/2019/watevrCTF_2019/sabataD/solve.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:01:42.000Z
|
2022-03-27T06:01:42.000Z
|
from ptrlib import *
def rot13(s):
output = b''
for c in s:
if ord('a') <= c <= ord('z'):
output += bytes([ord('a') + (c - ord('a') + 13) % 26])
elif ord('A') <= c <= ord('Z'):
output += bytes([ord('A') + (c - ord('A') + 13) % 26])
else:
output += bytes([c])
return output
def craft_payload(command, username, filepath):
payload = b''
for i in range(max(len(command), len(username), len(filepath))):
if len(command) > i:
payload += bytes([command[i]])
else:
payload += b'_'
if len(username) > i:
payload += bytes([username[i]])
else:
payload += b'_'
if len(filepath) > i:
payload += bytes([filepath[i]])
else:
payload += b'_'
payload += b'\x00' * (0xc8 - len(payload))
return rot13(payload)
sock = Socket("13.48.192.7", 50000)
payload = craft_payload(b'Fetch from file with index',
b'watevr-admin',
b'/home/ctf/flag.tx*')
sock.send(payload)
sock.interactive()
| 28.025
| 68
| 0.480821
|
33b60be318ddca8cf4cfb8cf0747c0cc9dba3234
| 1,938
|
py
|
Python
|
lib_crypto.py
|
Xeratec/pytrojan
|
42e1eb928d69ac514d8cbb0b0784bd8f0bf8f4a5
|
[
"MIT"
] | 1
|
2020-05-08T08:59:35.000Z
|
2020-05-08T08:59:35.000Z
|
lib_crypto.py
|
Xeratec/pytrojan
|
42e1eb928d69ac514d8cbb0b0784bd8f0bf8f4a5
|
[
"MIT"
] | null | null | null |
lib_crypto.py
|
Xeratec/pytrojan
|
42e1eb928d69ac514d8cbb0b0784bd8f0bf8f4a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import zlib
import base64
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
import Crypto.Hash.SHA
def encrypt_string(plaintext, key, VERBOSE=False):
## Returns base64 encrypted plaintext
chunk_size = 256-2-2*Crypto.Hash.SHA.digest_size
if VERBOSE: print '\tCompressing: %d bytes' % len(plaintext)
plaintext = zlib.compress(plaintext)
if VERBOSE: print "\tEncrypting %d bytes" % len(plaintext)
rsakey = RSA.importKey(key)
rsakey = PKCS1_OAEP.new(rsakey)
encrypted = ""
offset = 0
while offset < len(plaintext):
chunk = plaintext[offset:offset+chunk_size]
if len(chunk) % chunk_size != 0:
added_chunk = chunk_size - len(chunk)
chunk += " " * added_chunk
encrypted += rsakey.encrypt(chunk)
offset += chunk_size
if added_chunk < 0x10:
encrypted = "0x0" + str(hex(added_chunk))[2:] + encrypted
else:
encrypted = str(hex(added_chunk))+ encrypted
if VERBOSE: print "\tEncrypted: %d bytes" % len(encrypted)
encrypted = encrypted.encode("base64")
return encrypted
def decrypt(plaintext, key, VERBOSE=False):
rsakey = RSA.importKey(key)
rsakey = PKCS1_OAEP.new(rsakey)
chunk_size = 256
offset = 0
decrypted = ""
encrypted = base64.b64decode(plaintext)
added_chunk = int(encrypted[:4],base=0)
encrypted = encrypted[4:]
if VERBOSE: print "\tDecrypt: %d bytes " % len(encrypted)
while offset < len(encrypted):
decrypted += rsakey.decrypt(encrypted[offset:offset+chunk_size])
offset += chunk_size
decrypted = decrypted[:(len(decrypted)-added_chunk)]
if VERBOSE: print "\tDecompress: %d bytes " % len(decrypted)
decrypted = zlib.decompress(decrypted)
if VERBOSE: print "\tDecompressed: %d bytes " % len(decrypted)
return decrypted
| 32.3
| 72
| 0.645511
|
fc72f96fd6c9b9b4a0e72d4c94728eb72e2e8a11
| 30
|
py
|
Python
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_foobar.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | 8
|
2018-11-03T12:50:42.000Z
|
2021-12-15T15:36:14.000Z
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_foobar.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | 1
|
2021-09-03T16:55:39.000Z
|
2021-09-03T16:55:39.000Z
|
{{ cookiecutter.repo_service_reponame }}/tests/{{ cookiecutter.slug }}/test_foobar.py
|
boilpy/boilpy
|
78f353a4b45e1df2e573993d686418dfc1f13cc8
|
[
"MIT"
] | null | null | null |
import {{ cookiecutter.slug }}
| 30
| 30
| 0.733333
|
a6a0ea66e31bff194953fcc05d99f818eabb954d
| 3,298
|
py
|
Python
|
pl_examples/basic_examples/autoencoder.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | 1
|
2021-08-05T01:45:26.000Z
|
2021-08-05T01:45:26.000Z
|
pl_examples/basic_examples/autoencoder.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | null | null | null |
pl_examples/basic_examples/autoencoder.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | 1
|
2021-02-16T00:47:46.000Z
|
2021-02-16T00:47:46.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader, random_split
import pytorch_lightning as pl
from pl_examples import _TORCHVISION_AVAILABLE, cli_lightning_logo
if _TORCHVISION_AVAILABLE:
from torchvision import transforms
from torchvision.datasets.mnist import MNIST
else:
from tests.helpers.datasets import MNIST
class LitAutoEncoder(pl.LightningModule):
"""
>>> LitAutoEncoder() # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
LitAutoEncoder(
(encoder): ...
(decoder): ...
)
"""
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28 * 28, 64),
nn.ReLU(),
nn.Linear(64, 3),
)
self.decoder = nn.Sequential(
nn.Linear(3, 64),
nn.ReLU(),
nn.Linear(64, 28 * 28),
)
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--hidden_dim', type=int, default=128)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
dataset = MNIST('', train=True, download=True, transform=transforms.ToTensor())
mnist_test = MNIST('', train=False, download=True, transform=transforms.ToTensor())
mnist_train, mnist_val = random_split(dataset, [55000, 5000])
train_loader = DataLoader(mnist_train, batch_size=args.batch_size)
val_loader = DataLoader(mnist_val, batch_size=args.batch_size)
test_loader = DataLoader(mnist_test, batch_size=args.batch_size)
# ------------
# model
# ------------
model = LitAutoEncoder()
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
result = trainer.test(test_dataloaders=test_loader)
print(result)
if __name__ == '__main__':
cli_lightning_logo()
cli_main()
| 28.431034
| 87
| 0.639478
|
7887ab41e0402230d4eb0b05bab70d7a333d17d8
| 9,493
|
py
|
Python
|
src/pyronn_torch/conebeam.py
|
mareikethies/pyronn-torch
|
15f5e6dc4cfa9413d9bb46539806ee32e704161a
|
[
"MIT"
] | 8
|
2020-05-17T09:40:40.000Z
|
2022-01-07T09:38:07.000Z
|
src/pyronn_torch/conebeam.py
|
mareikethies/pyronn-torch
|
15f5e6dc4cfa9413d9bb46539806ee32e704161a
|
[
"MIT"
] | 5
|
2020-08-19T23:22:37.000Z
|
2021-07-09T14:11:57.000Z
|
src/pyronn_torch/conebeam.py
|
theHamsta/pyronn-torch
|
9641005b2ec78c8ede420a489ce5efaaddf9b12e
|
[
"MIT"
] | 5
|
2020-05-26T23:27:58.000Z
|
2021-07-09T07:36:12.000Z
|
#
# Copyright © 2020 Stephan Seitz <stephan.seitz@fau.de>
#
# Distributed under terms of the GPLv3 license.
"""
"""
import numpy as np
import scipy.linalg
import torch
import pyronn_torch
class State:
def __init__(self,
projection_shape,
volume_shape,
source_points,
inverse_matrices,
projection_matrices,
volume_origin,
volume_spacing,
projection_multiplier,
step_size=1.,
with_texture=True):
self.projection_shape = projection_shape
self.volume_shape = volume_shape
self.source_points = source_points
self.inverse_matrices = inverse_matrices
self.projection_matrices = projection_matrices
self.volume_origin = volume_origin
self.volume_spacing = volume_spacing
self.projection_multiplier = projection_multiplier
self.with_texture = with_texture
self.step_size = step_size
class _ForwardProjection(torch.autograd.Function):
@staticmethod
def forward(self, volume, state=None):
if state is None:
state = self.state
return_none = True
else:
return_none = False
volume = volume.float().cuda().contiguous()
projection = torch.zeros(state.projection_shape,
device='cuda',
requires_grad=volume.requires_grad).float().contiguous()
assert pyronn_torch.cpp_extension
if state.with_texture:
pyronn_torch.cpp_extension.call_Cone_Projection_Kernel_Tex_Interp_Launcher(
inv_matrices=state.inverse_matrices,
projection=projection,
source_points=state.source_points,
step_size=state.step_size,
volume=volume,
volume_spacing_x=state.volume_spacing[0],
volume_spacing_y=state.volume_spacing[1],
volume_spacing_z=state.volume_spacing[2])
else:
pyronn_torch.cpp_extension.call_Cone_Projection_Kernel_Launcher(
inv_matrices=state.inverse_matrices,
projection=projection,
source_points=state.source_points,
step_size=state.step_size,
volume=volume,
volume_spacing_x=state.volume_spacing[0],
volume_spacing_y=state.volume_spacing[1],
volume_spacing_z=state.volume_spacing[2])
self.state = state
if return_none:
return projection, None
else:
return projection,
@staticmethod
def backward(self, projection_grad, state=None, *args):
if state is None:
state = self.state
return_none = True
else:
return_none = False
projection_grad = projection_grad.float().cuda().contiguous()
volume_grad = torch.zeros(state.volume_shape,
device='cuda',
requires_grad=projection_grad.requires_grad)
assert pyronn_torch.cpp_extension
pyronn_torch.cpp_extension.call_Cone_Backprojection3D_Kernel_Launcher(
state.projection_matrices, projection_grad,
state.projection_multiplier, volume_grad, *state.volume_origin,
*state.volume_spacing)
self.state = state
if return_none:
return volume_grad, None
else:
return volume_grad,
class _BackwardProjection(torch.autograd.Function):
backward = staticmethod(_ForwardProjection.forward)
forward = staticmethod(_ForwardProjection.backward)
class ConeBeamProjector:
def __init__(self,
volume_shape,
volume_spacing,
volume_origin,
projection_shape,
projection_spacing,
projection_origin,
projection_matrices,
source_isocenter_distance=1,
source_detector_distance=1):
self._volume_shape = volume_shape
self._volume_origin = volume_origin
self._volume_spacing = volume_spacing
self._projection_shape = projection_shape
self._projection_matrices_numpy = projection_matrices
self._projection_spacing = projection_spacing
self._projection_origin = projection_origin
self._source_isocenter_distance = source_isocenter_distance
self._source_detector_distance = source_detector_distance
self._calc_inverse_matrices()
@classmethod
def from_conrad_config(cls):
import pyconrad.autoinit
import pyconrad.config
volume_shape = pyconrad.config.get_reco_shape()
volume_spacing = pyconrad.config.get_reco_spacing()
volume_origin = pyconrad.config.get_reco_origin()
projection_shape = pyconrad.config.get_sino_shape()
projection_spacing = [
pyconrad.config.get_geometry().getPixelDimensionX(),
pyconrad.config.get_geometry().getPixelDimensionY(),
]
projection_origin = [
pyconrad.config.get_geometry().getDetectorOffsetU(),
pyconrad.config.get_geometry().getDetectorOffsetV(),
]
projection_matrices = pyconrad.config.get_projection_matrices()
obj = cls(volume_shape=volume_shape,
volume_spacing=volume_spacing,
volume_origin=volume_origin,
projection_shape=projection_shape,
projection_spacing=projection_spacing,
projection_origin=projection_origin,
projection_matrices=projection_matrices)
return obj
def new_volume_tensor(self, requires_grad=False):
return torch.zeros(self._volume_shape,
requires_grad=requires_grad,
device='cuda')
def new_projection_tensor(self, requires_grad=False):
return torch.zeros(self._projection_shape,
requires_grad=requires_grad,
device='cuda')
def project_forward(self, volume, step_size=1., use_texture=True):
return _ForwardProjection.apply(
volume,
State(projection_shape=self._projection_shape,
volume_shape=self._volume_shape,
source_points=self._source_points,
inverse_matrices=self._inverse_matrices,
projection_matrices=self._projection_matrices,
volume_origin=self._volume_origin,
volume_spacing=self._volume_spacing,
projection_multiplier=self._projection_multiplier,
step_size=step_size,
with_texture=use_texture))[0]
def project_backward(self,
projection_stack,
step_size=1.,
use_texture=True):
return _BackwardProjection.apply(
projection_stack,
State(projection_shape=self._projection_shape,
volume_shape=self._volume_shape,
source_points=self._source_points,
inverse_matrices=self._inverse_matrices,
projection_matrices=self._projection_matrices,
volume_origin=self._volume_origin,
volume_spacing=self._volume_spacing,
projection_multiplier=self._projection_multiplier,
step_size=step_size,
with_texture=use_texture))[0]
def _calc_inverse_matrices(self):
if self._projection_matrices_numpy is None:
return
self._projection_matrices = torch.stack(
tuple(
torch.from_numpy(p.astype(np.float32))
for p in self._projection_matrices_numpy)).cuda().contiguous()
inv_spacing = np.array([1 / s for s in self._volume_spacing],
np.float32)
camera_centers = list(map(
lambda x: np.array(np.expand_dims(scipy.linalg.null_space(x), 0), np.float32),
self._projection_matrices_numpy))
source_points = list(map(
lambda x: (x[0, :3, 0] / x[0, 3, 0] * inv_spacing
- np.array(list(self._volume_origin)) * inv_spacing).astype(np.float32), camera_centers))
scaling_matrix = np.array([[inv_spacing[0], 0, 0], [0, inv_spacing[1], 0], [0, 0, inv_spacing[2]]])
inv_matrices = list(map(
lambda x:
(scaling_matrix @ np.linalg.inv(x[:3, :3])).astype(np.float32),
self._projection_matrices_numpy))
self._inverse_matrices = torch.stack(
tuple(map(torch.from_numpy, inv_matrices))).float().cuda().contiguous()
self._source_points = torch.stack(
tuple(map(torch.from_numpy, source_points))).float().cuda().contiguous()
self._projection_multiplier = self._source_isocenter_distance * self._source_detector_distance * \
self._projection_spacing[-1] * np.pi / self._projection_shape[0]
@property
def projection_matrices(self):
return self._projection_matrices_numpy
@projection_matrices.setter
def projection_matrices(self, numpy_matrices):
self._projection_matrices_numpy = numpy_matrices
self._calc_inverse_matrices()
| 39.065844
| 112
| 0.619088
|
35655708f6f0faa014073c158abb348d82cea232
| 5,585
|
py
|
Python
|
dataprep/clean/clean_my_nric.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 1,229
|
2019-12-21T02:58:59.000Z
|
2022-03-30T08:12:33.000Z
|
dataprep/clean/clean_my_nric.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 680
|
2019-12-19T06:09:23.000Z
|
2022-03-31T04:15:25.000Z
|
dataprep/clean/clean_my_nric.py
|
Waterpine/dataprep-1
|
4032acb1d1f2c413d4cb000d17e8ffa611315f9f
|
[
"MIT"
] | 170
|
2020-01-08T03:27:26.000Z
|
2022-03-20T20:42:55.000Z
|
"""
Clean and validate a DataFrame column containing
Malaysian National Registration Identity Card Numbers (NRICs).
"""
# pylint: disable=too-many-lines, too-many-arguments, too-many-branches
from typing import Any, Union
from operator import itemgetter
import dask.dataframe as dd
import numpy as np
import pandas as pd
from stdnum.my import nric
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, to_dask
def clean_my_nric(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean Malaysian National Registration Identity Card Numbers (NRICs) in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
col
The name of the column containing data of NRIC type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
If output_format = 'birthdate', return the registration date or the birth date.
If output_format = 'birthplace', return a dict containing the birthplace of the person.
(default: "standard")
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of NRIC data.
>>> df = pd.DataFrame({{
"nric": [
"770305021234",
"771305-02-1234",]
})
>>> clean_my_nric(df, 'nric')
nric nric_clean
0 770305021234 770305-02-1234
1 771305-02-1234 NaN
"""
if output_format not in {"compact", "standard", "birthdate", "birthplace"}:
raise ValueError(
f"output_format {output_format} is invalid. "
'It needs to be "compact", "standard", "birthdate" or "birthplace".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [_format(x, output_format, errors) for x in srs],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
return df
def validate_my_nric(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is NRIC in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(nric.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(nric.is_valid)
else:
return df.applymap(nric.is_valid)
return nric.is_valid(df)
def _format(val: Any, output_format: str = "standard", errors: str = "coarse") -> Any:
"""
Reformat a number string with proper separators and whitespace.
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators or whitespace.
If output_format = 'standard', return string with proper separators and whitespace.
If output_format = 'birthdate', return the registration date or the birth date.
If output_format = 'birthplace', return a dict containing the birthplace of the person.
"""
val = str(val)
result: Any = []
if val in NULL_VALUES:
return [np.nan]
if not validate_my_nric(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
return [error_result]
if output_format == "compact":
result = [nric.compact(val)] + result
elif output_format == "standard":
result = [nric.format(val)] + result
elif output_format == "birthdate":
result = [nric.get_birth_date(val)] + result
elif output_format == "birthplace":
result = [nric.get_birth_place(val)] + result
return result
| 32.47093
| 99
| 0.62077
|
0ea9a31d22f2a369e97c288f19d6f3de3373d9a8
| 3,665
|
py
|
Python
|
ext/v8/upstream/scons/engine/SCons/Tool/Perforce.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | 1
|
2015-11-05T01:29:05.000Z
|
2015-11-05T01:29:05.000Z
|
ext/v8/upstream/scons/engine/SCons/Tool/Perforce.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | null | null | null |
ext/v8/upstream/scons/engine/SCons/Tool/Perforce.py
|
bsingr/therubyracer
|
2397cae80aa8f458c028e28bdf2bd8a93e6161a6
|
[
"MIT",
"Unlicense"
] | null | null | null |
"""SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/Perforce.py 4629 2010/01/17 22:23:21 scons"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# This function should maybe be moved to SCons.Util?
from SCons.Tool.PharLapCommon import addPathIfNotExists
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SystemRoot' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
return SCons.Builder.Builder(action = PerforceAction, env = env)
#setattr(env, 'Perforce', PerforceFactory)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
addPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 34.904762
| 95
| 0.692497
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.