content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import torch
def stft_wrapper(x, fft_n, frame_shift, frame_length, window,
pad_mode="constant"):
"""Due to the different signature of torch.stft, write a
wrapper to handle this
input
-----
x: tensor, waveform, (batch, length)
window: tensor, window coef, (frame_length, )
output
------
tensor: (batch, frame_num, bin_num, 2)
"""
# there are better ways, but for convenience
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
# torch 1.6.*
return torch.stft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True, pad_mode=pad_mode)
else:
# torch > 1.7
return torch.stft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True, pad_mode=pad_mode,
return_complex=False) | 19ab7dc44526fbe8d5b214560c461f313d95cfe6 | 683,109 |
import torch
def one_hot_encoding(input, c):
"""
One-hot encoder: Converts NxHxW label image to NxCxHxW, where each label is stored in a separate channel
:param input: input image (NxHxW)
:param c: number of channels/labels
:return: output image (NxCxHxW)
"""
assert input.dim() == 3
N, H, W = input.size()
result = torch.zeros((N, c, H, W))
# torch.Tensor.scatter_(dim, index, src) -> Tensor
# eg: For 4d tensor
# self[i][index[i][j][k][h]][k][h] = src[i][j][k][h] # if dim == 1
result.scatter_(1, input.unsqueeze(1), 1)
return result | e91484b5b826db6da8b3877280cff31f0841c243 | 683,114 |
def kv_array_to_dict(kv_pairs):
"""
Takes an array of key=value formatted Strings
and returns a dict
"""
pairs = {}
if kv_pairs:
for kv_string in kv_pairs:
kv = kv_string.split("=")
if len(kv) == 2:
pairs[kv[0]] = kv[1]
return pairs | 586375649f591553c2ba919d5d8b0c9bbdd494bd | 683,116 |
import inspect
def call(method, **kwargs):
"""Calls method with only parameters it requires"""
# Due to SelfManaged overriding __new__, inspect cannot infer the expected variables from __new__
inspected_method = method.__init__ if inspect.isclass(method) else method
expected = inspect.signature(inspected_method).parameters.keys()
return method(**{k: v for k, v in kwargs.items() if k in expected}) | a3abb8ca298a5be3891404d8c96ac2974436b70d | 683,119 |
def read_database(file_name):
"""
Read the method templates from a file.
The first line contains the arguments for this method. All other lines
contain the methods body.
"""
with open(file_name, 'r') as file_obj:
lines = file_obj.readlines()
methods = {}
name = ''
for line in lines:
if 'def' in line.split():
# found the start of a new method
name = line.split()[1].split('(')[0]
arguments = line.split()[1].split('(')[1].split(')')[0]
methods[name] = [arguments]
continue
# This is a line that is not part of a definition.
methods[name].append(line)
return methods | 6419b72e26957e50ba7867075b041f2b128136a9 | 683,124 |
def _derive_module_name(ctx):
"""Gets the `module_name` attribute if it's set in the ctx, otherwise derive a unique module name using the elements
found in the label."""
module_name = getattr(ctx.attr, "module_name", "")
if module_name == "":
module_name = (ctx.label.package.lstrip("/").replace("/", "_") + "-" + ctx.label.name.replace("/", "_"))
return module_name | 26eb19b2f24ccf22c0387f14aff4caafb3cc63f2 | 683,125 |
def inr(value):
"""Formats value as INR."""
return f"₹{value:,.2f}" | cef7e68eedf508fd1990d6f9d315a032aabbfaf3 | 683,126 |
import json
def fetch_json_from_path(path):
"""Helper method to fetch json dict from file
Args:
path: Path to fetch json dict
Returns:
dict: JSON object stored in file at path
"""
if path is not None:
with open(path, "r") as file:
return json.load(file)
else:
return None | caff785cef7b29378cfd1e7601155a1a1635fd3c | 683,131 |
def back_num(num: int) -> bytes:
"""Set terminal background color to a numbered color (0-255)."""
return b"\x1b[48;5;%dm" % (num) | 32405872e9a5afcc9c95c7ebb083dfe492e70d01 | 683,133 |
def build_testset_surprise(dataset):
"""
Build a test set from a Surprise Dataset so that it can be used
for making predictions and model evaluation.
"""
testset = dataset.build_full_trainset().build_testset()
return testset | c0429196d95eae3e79e92e1d545294aea4984ddc | 683,135 |
def add_alternative_source(transfer, alt_source):
"""
Adds an alternative source to a transfer
Args:
transfer: A dictionary created with new_transfer
alt_source: Alternative source
Returns:
For convenience, transfer
"""
transfer['sources'].push_back(alt_source)
return transfer | 215a7473edc1139d95d81be3c7ee71dc7ab6a7ba | 683,137 |
def ComputeQ(node_weights):
""" Computes the value of Q (sum of node weights) given the node weights of a graph. """
return node_weights.sum() | 05672096e41861f2ab9f240b733bc65eb78ea2f6 | 683,138 |
def get_isbn(raw):
"""
Extract ISBN(s).
@param raw: json object of a Libris edition
@type raw: dictionary
"""
identified_by = raw["mainEntity"].get("identifiedBy")
if identified_by:
isbns = [x for x in identified_by
if x["@type"].lower() == "isbn"]
return [x["value"] for x in isbns][0]
return None | 55f4f0ea6d8b1b70544dc5b74f703646427f1f81 | 683,144 |
def has_any_labels(revision, labels):
"""
Return true if any labels are present.
"""
for label in labels:
if label in revision:
return True
return False | 504f3e07da0a8a30e6fc6138ba8c61770933b81c | 683,146 |
def parse_parent(docname):
""" Given a docname path, pick apart and return name of parent """
lineage = docname.split('/')
lineage_count = len(lineage)
if docname == 'index':
# This is the top of the Sphinx project
parent = None
elif lineage_count == 1:
# This is a non-index doc in root, e.g. about
parent = 'index'
elif lineage_count == 2 and lineage[-1] == 'index':
# This is blog/index, parent is the root
parent = 'index'
elif lineage_count == 2:
# This is blog/about
parent = lineage[0] + '/index'
elif lineage[-1] == 'index':
# This is blog/sub/index
parent = '/'.join(lineage[:-2]) + '/index'
else:
# This should be blog/sub/about
parent = '/'.join(lineage[:-1]) + '/index'
return parent | 70bdf557325e04bd85a194b40ceccf3dd3722b19 | 683,147 |
def _get_config_option(parser, section='stere', option='', default=''):
"""Get an option from a config section, if it exists.
Arguments:
section(str): The name of the section
option(str): The name of the option
Returns:
str: The found value, or else the default value
"""
if parser.has_option(section, option):
return parser.get(section, option)
return default | 3c124eea50067318e320de32e5ce9fa85e6083af | 683,153 |
def string_to_bool(arg):
"""Converts a string into a returned boolean."""
if arg.lower() == 'true':
arg = True
elif arg.lower() == 'false':
arg = False
else:
raise ValueError('ValueError: Argument must be either "true" or "false".')
return arg | 279c764d0a2a7b10aca291dc6a13103219e106c6 | 683,155 |
def all_subsets(aset):
"""Solution to exercise C-4.15.
Write a recursive function that will output all the subsets of a set of n
elements (without repeating any subsets).
--------------------------------------------------------------------------
Solution:
--------------------------------------------------------------------------
I've made the following assumptions:
1. The input is a list of unique numbers
2. The set itself is considered a subset (not a proper subset)
3. The empty set is considered a subset
"""
def recurse(alist):
if not alist:
return [[]] # Base case, return empty set
prev_lists = recurse(alist[1:])
return prev_lists + [[alist[0]] + y for y in prev_lists]
return recurse(aset) | bc8aa785303510bed33a56c859d4154e4f4045c5 | 683,156 |
def is_space_free(board, move):
"""Return true if the passed move is free on the passed board."""
return board[move] == ' ' | 5d2ebdc6747237448989bf6abf215b2c53c570d2 | 683,160 |
def check_module(nwbfile, name, description=None):
"""
Check if processing module exists. If not, create it. Then return module.
Parameters
----------
nwbfile: pynwb.NWBFile
name: str
description: str | None (optional)
Returns
-------
pynwb.module
"""
if name in nwbfile.modules:
return nwbfile.modules[name]
else:
if description is None:
description = name
return nwbfile.create_processing_module(name, description) | 605d2672524f5ac033d51c08fab7e5f39e80e7cc | 683,161 |
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, (~number & (number-1)).bit_length()) | e174ecc79b27592323084bbda0ab993c9ba292ad | 683,162 |
def toposort(tasks):
"""Simple topological sort routine for task lists. Not fast, but easy
- especially leaves nodes in original order if already topologically
sorted. Dependencies that are not in the tasks list are ignored for
the purpose of the sort.
"""
tasks_set = set(tasks)
tasks_out = []
tasks_out_set = set()
to_do = tasks
while len(to_do) > 0:
new_to_do = []
for t in to_do:
if set(t.deps).intersection(tasks_set).issubset(tasks_out_set):
tasks_out.append(t)
tasks_out_set.add(t)
else:
new_to_do.append(t)
to_do = new_to_do
return tasks_out | 66c6c8a4ccb1312c628d7b15528badacb3fd8ec0 | 683,167 |
def quiz_question_change(statement):
"""
Get link for a quiz question update.
:param statement: the xAPI statement
:return: The url location.
"""
return '/assistants/api/question_sync_agent/' | 3cf745c984e3fbaa6ded7d42c3548b78511567aa | 683,172 |
import random
def get_uniform_mutation_function(minimum, maximum):
"""
Returns a function that returns a value drawn from a uniform distribution over the closed interval [minimum, maximum]; see :ref:`mutation-functions`
:Valid For:
any gene type
:param minimum: the minimum allowed value
:type minimum: int/float
:param maximum: the maximum allowed value
:type maximum: int/float
:returns: a sample from a uniform distribution
"""
return lambda value: random.uniform(minimum, maximum) | c9225f92f17111c6a176d05fe335959da7a8a50f | 683,173 |
def is_bit_set(a, offset):
""" Checks whether the offset bit in a is set or not.
Returns
bool, True if bit in position offset in a is 1
"""
return a & (1 << offset) != 0 | 0eba6b8253ed83bc927cc6c8615225074e4b8c94 | 683,180 |
def avoid_hazards(future_head, data):
"""
Return True if the proposed future_head avoids the hazards, False if it means
you will hit a hazard.
"""
result = True
# Get the list of hazards
hazards = data["hazards"]
# If the future head is in a hazard, return False to mean you'll hit a hazard next turn.
if future_head in hazards:
result = False
return result | 2116295b6ebf2333a5177940bed6fd66c759b8ea | 683,181 |
def __io_addr_reg(op):
"""
Return the i/o port address 'A' and register Rr/Rd from a 2-byte IN or OUT opcode sequence
"""
AVR_IO_IN_ADDR_MASK = 0x060F # mask for 'A' address for opcode `IN Rd,A`. nb non-contiguous
addr_part = op & AVR_IO_IN_ADDR_MASK
addr = ((addr_part >> 5) & 0x0030) | (addr_part & 0x0F)
AVR_IO_IN_REG_MASK = 0x01F0
reg_part = op & AVR_IO_IN_REG_MASK
reg = (reg_part >> 4) & 0x01F
return (addr, reg) | b9e9ed0f2c426cc0f6a0fa37b30371228e81d445 | 683,184 |
import string
def remove_punctuation(text: str) -> str:
""" Removes punctuation from text
:param text: The string being searched and replaced on
:return: Text without the punctuation characters
"""
punctuation = string.punctuation + '¿¡'
table = str.maketrans('', '', punctuation)
words = text.split()
stripped = [word.translate(table) for word in words]
return ' '.join(stripped) | 7b1ce1e4bc54b1a18f443aeda9346ea9e6ac84e2 | 683,189 |
import crypt
def generate_mosquitto_user_line(username, password):
"""Generates a line for a mosquitto user with a crypt hashed password
:username: username to use
:password: password that will be hashed (SHA512)
:returns: a line as expected by mosquitto
"""
password_hash = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
line = f"{username}:{password_hash}"
return line | 7b9b3b52306233b91e20665f62a7d203b543b90f | 683,192 |
import torch
def make_cuda(model):
"""Use CUDA if available."""
if torch.cuda.is_available():
model = model.cuda()
return model | d2c5a55acd3d3adedb2bbdc60dbb5594bb5a0a81 | 683,193 |
def isPower2(num):
"""
Check if num is power of two
"""
return ((num & (num - 1)) == 0) and num > 0 | 46160f29e78252f3e2e195d97c23d2b647450de5 | 683,195 |
def is_indexable_but_not_string(obj):
"""Return True if ``obj`` is indexable but isn't a string."""
return not hasattr(obj, "strip") and hasattr(obj, "__getitem__") | 40c35dd3f800a0b7f7ccee8f1164426626a48e00 | 683,197 |
def _process_keys(left, right):
"""
Helper function to compose cycler keys
Parameters
----------
left, right : Cycler or None
The cyclers to be composed
Returns
-------
keys : set
The keys in the composition of the two cyclers
"""
l_key = left.keys if left is not None else set()
r_key = right.keys if right is not None else set()
if l_key & r_key:
raise ValueError("Can not compose overlapping cycles")
return l_key | r_key | 684f93fe9f18e0a91de3f1293f9a8a98ec71191f | 683,205 |
from typing import List
import ipaddress
def is_ip_allowed(ip: str, allowed_networks: List[str]) -> bool:
"""Return True if the ip is in the list of allowed networks
Any IP is allowed if the list is empty
"""
if not allowed_networks:
return True
try:
addr = ipaddress.ip_address(ip)
except ValueError:
# Invalid IP
return False
for allowed_network in allowed_networks:
if addr in ipaddress.ip_network(allowed_network):
return True
return False | ff9ced7303d7eb8fd588407c6c2c550778375bd8 | 683,208 |
def calculate_boundaries(dist_args1, dist_args2, dist_type, shift):
"""
Calculate minimum and maximum reward possible for certain distribution types.
For normal distribution take 3 times standard deviation. These minimum and maximum
are used to determine the bin sizes.
:param dist_args1: parameter of the distribution
:param dist_args2: parameter of the distribution
:param dist_type: string indicating which distribution type
:param shift: shift of the multi-modal distribution
:return: Minimum and maximum reward
"""
if dist_type == 'uniform' or dist_type == 'uniform_wide':
low_1, high_1 = dist_args1[0], dist_args1[1]
low_2, high_2 = dist_args2[0], dist_args2[1]
min_r = min(low_1, low_2)
max_r = max(high_1, high_2)
elif dist_type == 'normal' or dist_type == 'normal2':
mean_1, std_1 = dist_args1[0], dist_args1[1]
mean_2, std_2 = dist_args2[0], dist_args2[1]
min_r = min(mean_1 - 3 * std_1, mean_2 - 3 * std_2)
max_r = max(mean_1 + 3 * std_1, mean_2 + 3 * std_2)
elif dist_type == 'multi-modal':
min_r, max_r = dist_args2[0] - shift, dist_args2[0] + 0.5 + 0.5 + shift
elif dist_type == 'categorical':
min_r = min(dist_args1[0] + dist_args2[0])
max_r = max(dist_args1[0] + dist_args2[0])
else:
print('invalid dist type choose: uniform, normal, multi-modal, categorical')
min_r = None
max_r = None
return min_r, max_r | dca0d09aa5dd15e1ff3b18a0befb7f4ed0c71d87 | 683,209 |
def device(portnum):
"""Turn a port number into a device name"""
return 'COM%d' % (portnum+1) | 2fd900d723346154b5934d5aa40fb40305235ee7 | 683,211 |
def func(a):
"""This is a function that just returns `a`."""
return a | 93746f98321eb957b9c830222f7da5211f00982b | 683,212 |
def T0_T(M, gamma):
"""Ratio of total to static temperature for adiabatic flow (eq. 3.28)
:param <float> M: Mach # at area A
:param <float> gamma: Specific heat ratio
:return <float> Temperature ratio T0/T
"""
return 1.0 + 0.5 * (gamma - 1.0) * M ** 2 | 3a7b6877d07727282d8247a229a57b38f1460967 | 683,213 |
def flip_ctrlpts2d(ctrlpts2d, size_u=0, size_v=0):
""" Flips a list of surface 2-D control points in *[u][v]* order.
The resulting control points list will be in *[v][u]* order.
:param ctrlpts2d: 2-D control points
:type ctrlpts2d: list, tuple
:param size_u: size in U-direction (row length)
:type size_u: int
:param size_v: size in V-direction (column length)
:type size_v: int
:return: flipped 2-D control points
:rtype: list
"""
if size_u <= 0 or size_v <= 0:
# Detect array shapes
size_u = len(ctrlpts2d)
size_v = len(ctrlpts2d[0])
new_ctrlpts2d = [[[] for _ in range(size_u)] for _ in range(size_v)]
for i in range(size_v):
for j in range(size_u):
new_ctrlpts2d[i][j] = [float(c) for c in ctrlpts2d[j][i]]
return new_ctrlpts2d | 0f68efcab4a6c08e697492f7c8cc7aaf4e03d397 | 683,214 |
def lerp(x, from_, to):
"""Linear interpolates a value using the `x` given and ``(x,y)`` pairs `from_` and `to`.
All x values must be numbers (have `-` and `/` defined).
The y values can either be single numbers, or sequences of the same length.
If the latter case, then each dimension is interpolated linearly and the
output is a sequence of the same type."""
x0, x1, y0, y1 = from_[0], to[0], from_[1], to[1]
if x0 == x1: return y0 # degenerate case
perc = (x-x0)/float(x1-x0)
# see if they're sequences
try:
y = [(t-f)*perc + f for f, t in zip(y0, y1)]
# cast the output to the type of the input
return type(y0)(y)
except TypeError:
y = (to[1]-from_[1])*perc + from_[1]
return y | c389d8c6f6db684420518dceeb510c985b4eee5c | 683,215 |
import itertools
def pairs(list1,list2):
"""
:param list1: list of elements
:param list2: list of elements
:return: pairs of elements with no repetition
"""
temp = list(itertools.product(list1, list2))
# output list initialization
out = []
# iteration
for elem in temp:
if elem[0] != elem[1]:
out.append(elem)
return out | 4b42e4ff3776d552c584ef47367f5b4797f34d96 | 683,216 |
def make_goal(nb_columns: int = 3) -> str:
"""
Define the goal expressed in LDLf logic.
E.g. for nb_columns = 3:
<(!c0 & !c1 & !c2)*;c0;(!c0 & !c1 & !c2)*;c1;(!c0 & !c1 & !c2)*;c2>tt
:param nb_columns: the number of column
:return: the string associated with the goal.
"""
labels = ["c" + str(column_id) for column_id in range(nb_columns)]
empty = "(!" + " & !".join(labels) + ")"
f = "<" + empty + "*;{}>tt"
regexp = (";" + empty + "*;").join(labels)
f = f.format(regexp)
return f | 401dd04090e5ffdb7b4337e770897a4d52525a2b | 683,217 |
def _get_from_f(word: dict, key: str):
"""HELPER: extracts the key from the word dictionary
:param word: dictionary from the json word
:type word: dict
:param key: key that needed to be found in the f dictionary that in word
:type key: str
:return: value of key in the f key in word dictionary
:rtype: str
"""
try:
if "f" in word:
if key in word["f"]:
return word["f"][key]
except TypeError:
pass | 62262ad9bd2bd592bab193c88d5d0d206d781943 | 683,220 |
import base64
def save_b64_image(base64_string):
"""Decodes a b64-encoded image and saves it locally to disk
to be displayed in the Monitoring Station GUI
Args:
base64_string (str): encoded image string
Returns:
(str): filename for decoded image
"""
image_bytes = base64.b64decode(base64_string)
with open("temp-img.jpg", "wb") as out_file:
out_file.write(image_bytes)
return "temp-img.jpg" | 7acdcd6f6924d0df74bfeb0e1a2d03be9688840d | 683,221 |
def epc_calc_common_mode(reg_dict):
"""
Returns True if common mode is enabled, False otherwise
Parameters
----------
reg_dict : dict
The dictionary that contains all the register information
Returns
----------
bool
True if common mode enabled, false otherwise
"""
cm_enabled = reg_dict["num_dcs"][2] == 0 and reg_dict["mod_sel"][2] == 3
return cm_enabled | 3b59f645f7dca7b1ba3a66e26f7ab38136201274 | 683,222 |
def get_item(tbl:dict, key):
"""
Looks up an item in a dictionary by key first, assuming the key is in the
dictionary. Otherwise, it checks if the key is an integer, and returns
the item in that position.
:param tbl: The dictionary to look in
:param key: The key, or integer position to get the item of
:return: The item, or it not found, None
"""
if key in tbl:
return tbl[key]
elif not isinstance(key, int):
return None
# We have an integer key by this point
if key < 0:
return None
elif key >= len(tbl):
return None
else:
count = 0
for rval in tbl.values():
if count == key:
return rval
count+=1
#Should never make it here
raise ValueError("Shouldn't happen in anp.get_item") | 0f46bee75a0c289d9603696f6b3476542b1e9dcf | 683,224 |
import collections
def _list_to_dicts(list_of_values, keys):
"""Restores a list of dicts from a list created by `_dicts_to_list`.
`keys` must be the same as what was used in `_dicts_to_list` to create the
list. This is used to restore the original dicts inside `host_call_fn` and
`metrics_fn`.
Transforms a list to a list of dicts.
Args:
list_of_values: (list) a list of values. The length must a multiple of the
length of keys.
keys: (list) a list of keys.
Returns:
(list) a list of dicts.
"""
num_dicts = len(list_of_values) // len(keys)
list_of_dicts = [collections.OrderedDict() for i in range(num_dicts)]
for i, key in enumerate(keys):
for j in range(num_dicts):
list_of_dicts[j][key] = list_of_values[i * num_dicts + j]
return list_of_dicts | e32592187866140d3cac152c5402e4645d2e3e1d | 683,230 |
def rotate270_augment(aug=None, is_training=True, **kwargs):
"""Rotation by 270 degree augmentation."""
del kwargs
if aug is None:
aug = []
if is_training:
return aug + [('rotate270', {})]
return aug | 50e1f1878ede857a1b203cd1ebf8384bdd4e80bc | 683,232 |
def create_deets_message(time, size, image):
"""Creates message of image details for the GUI client
Image details returned include the time the image was
uploaded or processed and the image size in pixels. If
the image was original, the upload time is returned. If
the image was inverted, the processed time is returned.
Args:
time (str): timestamp of upload/processing
size (tuple): width, height of image in pixels
image (str): name of image
Returns:
str: message to be shown to user
"""
if "inverted" in image:
time_type = "processed"
else:
time_type = "uploaded"
width, height = size
deets_message = "Time {}: {}\n" \
"Image size: {} x {}" \
.format(time_type, time, width, height)
return deets_message | f9989d85f1cdd10df7901c2cfd879fe25fc6cf39 | 683,233 |
def _get_venue_storage(districts):
"""Initializes a dict for storing venues, organized by district.
"""
# This ensures districts are included even if they have no rated venues.
res = {district['name']: [] for district in districts}
res[None] = [] # Hack: avoids exception if venue has no district.
return res | 8ab3ea6cbdc5e329e878361fc964c0b39687634a | 683,235 |
import re
def create_form_data(data):
"""
Convert all keys in data dictionary from camelCaseFormat to
underscore_format and return the new dict
"""
to_lower = lambda match: "_" + match.group(1).lower()
to_under = lambda x: re.sub("([A-Z])", to_lower, x)
return dict(map(lambda x: (to_under(x[0]), x[1]), data.items())) | cdd0183407a86bae41195a99bac09ca50d32f8a4 | 683,236 |
def find_my_friend(queue: list, friend_name: str) -> int:
"""
:param queue: list - names in the queue.
:param friend_name: str - name of friend to find.
:return: int - index at which the friends name was found.
"""
for i, name in enumerate(queue):
if friend_name == name:
return i
return -1 | 7ce266b119f4045a54ed2c0f9963378467c0d8cc | 683,237 |
import networkx as nx
def OeMolToGraph(oemol):
"""
Convert charged molecule to networkX graph and add WiberBondOrder as edge weight
Parameters
----------
mol: charged OEMolGraph
Returns
-------
G: NetworkX Graph of molecule
"""
G = nx.Graph()
for atom in oemol.GetAtoms():
G.add_node(atom.GetIdx(), name=atom.GetName())
for bond in oemol.GetBonds():
try:
fgroup = bond.GetData('fgroup')
except:
fgroup = False
G.add_edge(bond.GetBgnIdx(), bond.GetEndIdx(), weight=bond.GetData("WibergBondOrder"), index=bond.GetIdx(),
aromatic=bond.IsAromatic(), in_ring=bond.IsInRing(), fgroup=fgroup)
return G | caab6359b8afd3e53b5c9d782e0ea522de9af449 | 683,240 |
import pathlib
def return_file_extension(_path):
"""Return string of file extension. Example: .txt; .py"""
return pathlib.Path(_path).suffix | ac19fe9d77a2cd4dc64a5a0b21fe21f624f09232 | 683,243 |
def read_csv(filepath):
"""
Simple CSV reader function required for blast function to output
results as a dictionary
Input
-----
filepath = str, path to the CSV file containing blast results
Output
------
dictionary = dict, keys = column names, values = columns.
Contains 5 str, 8 int, and 1 float columns.
"""
with open(filepath, "r") as f:
# Column names
header = ['QueryId', 'TargetId', 'QueryMatchStart', 'QueryMatchEnd',
'TargetMatchStart', 'TargetMatchEnd', 'QueryMatchSeq',
'TargetMatchSeq', 'NumColumns', 'NumMatches',
'NumMismatches', 'NumGaps', 'Identity', 'Alignment']
# Rest of the results as a list of lists
data = [line.strip().split(",") for line in f.readlines()]
# Transposing the list of lists
data = list(map(list, zip(*data)))
# Converting type for the int and float columns
intIndices = [2, 3, 4, 5, 8, 9, 10, 11]
data = [
list(map(int, column)) if i in intIndices else column
for i, column in enumerate(data)
]
floatIndices = [12]
data = [
list(map(float, column)) if i in floatIndices else column
for i, column in enumerate(data)
]
# Creating the dictionary
dictionary = dict(zip(header, data))
return dictionary | d70e3507541d2cc34422a6805c9c89db2bb6e8cf | 683,244 |
def array_find(arr, obj) -> int:
"""A helpher function which finds the index of an object in an array.
Instead of throwing an error when no index can be found it returns -1.
Args:
arr : the array to be searched
obj : the object whose index is to be found.
Returns:
int: The index of obj in the array. -1 if the object is not in the array
"""
index = -1
try:
index = arr.index(obj)
return index
except:
return -1 | df8733b073d24d47f7be8f3b01b3f9e2d78f51bc | 683,247 |
def __calculate_waterfootprint(wf_ing, quantity):
"""
Calculate the right water footprint of a ingredient from its
(l/kg) water footprint and the quantity provided (in gr).
:param wf_ing: the water footprint of the ingredient.
:param quantity: the quantity of the ingredient.
:return: the water footprint calcuated on the quantity.
"""
return round((wf_ing * quantity) / 1000, 2) | eb675f718dfdf619cf874c8a1fb2cca2f55ed0a5 | 683,249 |
def _is_extended_mux_needed(messages):
"""Check for messages with more than one mux signal or signals with more than one multiplexer value."""
for message in messages:
multiplexers = [
signal.name
for signal in message.signals
if signal.is_multiplexer
]
if len(multiplexers) > 1:
return True
for signal in message.signals:
if signal.multiplexer_ids:
if len(signal.multiplexer_ids) > 1:
return True
return False | 61e5c93136a47fcf636da753e3e94304a9e57963 | 683,251 |
def parse_int(arg):
"""
Parse an integer of an unknown base.
The supported bases are 2, 8, 10 and 16.
"""
arg = arg.lower()
base = 10
if arg.startswith('0x'):
base = 16
elif arg.startswith('0b'):
base = 2
elif arg.startswith('0o'):
base = 8
return int(arg, base) | 60d201517280733566d467e4cc31d17f9a588363 | 683,253 |
def _combine_external_inputs_with_precursor_nodes(node, external_inputs):
"""
User_provided_input_nodes.
Args:
node (OnnxGraphNode): Node instance.
external_inputs (list[str]): Inputs in onnx ir.
Returns:
list[str], precursor nodes list.
"""
inputs = set(node.ir_node_inputs)
to_be_added = list(inputs & set(external_inputs))
precursor = node.ir_node_precursor
# Add external inputs to precursor as the order of its inputs.
for item in to_be_added:
node_idx = node.ir_node_inputs.index(item)
precursor.insert(node_idx, item)
return precursor | 1d24dee3a1829f6b69f6863c0a74236de4dd8124 | 683,254 |
import random
import string
def generate_name(length=8): # type: (int) -> str
"""Generate and return a random name."""
return ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(length)) | 2814628523c6a7512eac7418970022fe70be7beb | 683,255 |
import re
def check_mixed_digits(value):
"""
Checks if an string has mixed digits
:param value: String to be checked
:type value: String
:returns: True if there are mixed digits in the string
:rtype: Boolean
Examples:
>>> check_mixed_digits('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse id lacus rhoncus, varius lorem vel, congue quam.')
False
>>> check_mixed_digits('123 456')
False
>>> check_mixed_digits('abcd 123 Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse id lacus rhoncus, varius lorem vel, congue quam.')
True
"""
return re.search('[a-zA-Z]',value) is not None and re.search('\d+',value) is not None | 3bff1aaec761eaf84894156ab54d89da9b93ae02 | 683,260 |
def file_fzp_start(filename):
"""
finds the start of the fzp data
:param filename: string of the fzp file name
:return: number of lines to skip at top of file
"""
with open(filename) as in_f:
c= 0
cols = []
#find start of VISSIM data
line = in_f.readline()
while 'VehNr;' not in line:
line = in_f.readline()
cols = [x.strip() for x in line.split(';')][:-1]
c +=1
return c | ce502de052cb2f16cc4be96dcb5053f790376065 | 683,267 |
def emphasis_sub(match):
"""Substitutes <strong>, <em>, and <strong><em> tags."""
level = len(match.group(1))
content = match.group(2)
if level == 3:
return '<strong><em>{0}</em></strong>'.format(content)
elif level == 2:
return '<strong>{0}</strong>'.format(content)
elif level == 1:
return '<em>{0}</em>'.format(content) | ad38b3e12fa6876f6c7f15d06c4eb72aa579d7e3 | 683,271 |
import math
def calculate_distance(coord1, coord2, box_length=None):
"""
Calculate the distance between two 3D coordinates.
Parameters
----------
coord1, coord2: list
The atomic coordinates
Returns
-------
distance: float
The distance between the two points.
"""
distance = 0
for i in range(3):
dim_dist = (coord1[i] - coord2[i])
if box_length:
dim_dist = dim_dist - box_length * round(dim_dist / box_length)
dim_dist = dim_dist**2
distance += dim_dist
distance = math.sqrt(distance)
return distance | a43eb15406ea4eaf3c59bb27953b3d55f166a037 | 683,273 |
import re
def isurl(value):
"""
Return whether or not given value is an URL.
If the value is an URL, this function returns ``True``, otherwise ``False``.
Examples::
>>> isurl('http://foo.bar#com')
True
>>> isurl('http://foobar.c_o_m')
False
:param value: string to validate URL
"""
# Regex patterns for validating URL is taken from
# Django's URLValidator class
ul = '\u00a1-\uffff'
# IP patterns
ipv4_re = r'(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)(?:\.(?:25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
ipv6_re = r'\[[0-9a-f:\.]+\]'
# Host patterns
hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?'
domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*'
tld_re = r'\.(?!-)(?:[a-z' + ul + '-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\.?' # may have a trailing dot
host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)'
url = re.compile(r'^(ftp|tcp|rtmp|udp|wss?|https?)://(?:\S+(?::\S*)?@)?(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')(?::\d{2,5})?(?:[/?#][^\s]*)?\Z', re.IGNORECASE)
if value == '' or len(value) >= 2083 or len(value) <= 3:
return False
return bool(url.match(value)) | 746f82809d4c196b56192fe8401c4efb8bc8a7c9 | 683,274 |
def longest_common_subsequence(x, y):
"""longest common subsequence
Dynamic programming
:param x:
:param y: x, y are lists or strings
:returns: longest common subsequence in form of a string
:complexity: `O(|x|*|y|)`
"""
n = len(x)
m = len(y)
# -- compute optimal length
A = [[0 for j in range(m + 1)] for i in range(n + 1)]
for i in range(n):
for j in range(m):
if x[i] == y[j]:
A[i + 1][j + 1] = A[i][j] + 1
else:
A[i + 1][j + 1] = max(A[i][j + 1], A[i + 1][j])
# -- extract solution
sol = []
i, j = n, m
while A[i][j] > 0:
if A[i][j] == A[i - 1][j]:
i -= 1
elif A[i][j] == A[i][j - 1]:
j -= 1
else:
i -= 1
j -= 1
sol.append(x[i])
return ''.join(sol[::-1]) # inverse solution | fcc2b046a965d09e76bd867b32c8744fa1c25bc9 | 683,275 |
import torch
def get_charges(node):
""" Solve the function to get the absolute charges of atoms in a
molecule from parameters.
Parameters
----------
e : tf.Tensor, dtype = tf.float32,
electronegativity.
s : tf.Tensor, dtype = tf.float32,
hardness.
Q : tf.Tensor, dtype = tf.float32, shape=(),
total charge of a molecule.
We use Lagrange multipliers to analytically give the solution.
$$
U({\bf q})
&= \sum_{i=1}^N \left[ e_i q_i + \frac{1}{2} s_i q_i^2\right]
- \lambda \, \left( \sum_{j=1}^N q_j - Q \right) \\
&= \sum_{i=1}^N \left[
(e_i - \lambda) q_i + \frac{1}{2} s_i q_i^2 \right
] + Q
$$
This gives us:
$$
q_i^*
&= - e_i s_i^{-1}
+ \lambda s_i^{-1} \\
&= - e_i s_i^{-1}
+ s_i^{-1} \frac{
Q +
\sum\limits_{i=1}^N e_i \, s_i^{-1}
}{\sum\limits_{j=1}^N s_j^{-1}}
$$
"""
e = node.data['e']
s = node.data['s']
sum_e_s_inv = node.data['sum_e_s_inv']
sum_s_inv = node.data['sum_s_inv']
sum_q = node.data['sum_q']
return {'q_hat': -e * s ** -1 + (s ** -1) * torch.div(sum_q + sum_e_s_inv, sum_s_inv)} | 519b3ee62d4ff17247215eb77100366407338dfc | 683,280 |
import re
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output | e6d31ca116ff490533655de4d6800ff60776c669 | 683,285 |
def cgi_decode(s):
"""Decode the CGI-encoded string `s`:
* replace "+" by " "
* replace "%xx" by the character with hex number xx.
Return the decoded string. Raise `ValueError` for invalid inputs."""
# Mapping of hex digits to their integer values
hex_values = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4,
'5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15,
'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15,
}
t = ""
i = 0
while i < len(s):
c = s[i]
if c == '+':
t += ' '
elif c == '%':
digit_high, digit_low = s[i + 1], s[i + 2]
i += 2
if digit_high in hex_values and digit_low in hex_values:
v = hex_values[digit_high] * 16 + hex_values[digit_low]
t += chr(v)
else:
raise ValueError("Invalid encoding")
else:
t += c
i += 1
return t | 310519ac6edd52dc2131a32695e29aa5ad610f43 | 683,286 |
def should_apply_max_and_skip_env(hparams):
"""MaxAndSkipEnv doesn't make sense for some games, so omit it if needed."""
return hparams.game != "tictactoe" | ad2988a29386ae9c89a5ccf40ab404acfeb6d050 | 683,287 |
from pathlib import Path
def norm_abs_path(path, ref_path):
"""\
Convert `path` to absolute assuming it's relative to ref_path (file or dir).
"""
path, ref_path = Path(path), Path(ref_path).absolute()
ref_dir = ref_path if ref_path.is_dir() else ref_path.parent
return ref_dir / path | 91033a7cb3631616799c204259b5830c931035f0 | 683,288 |
from typing import List
def mako_local_import_str(package_name:str, key:str, imports:List[str])->str:
"""
Create an import string for mako template from the output_patterns.yml file, like
`from docassemble.playground1.interview_generator import mako_indent, varname`
"""
return 'from ' + package_name + '.' + key + ' import ' + ",".join(imports) | 72d169114e38428caf2ecade3c6cfa8a83be416b | 683,291 |
def get_user_choice(choices, choice_type, default_value=""):
"""
A common method to take user choice from a list of choices
Args:
(list) choices - list of choices
(str) choice_type - Type of choice
(boolean) default_value - Return default value in case wrong input
Returns:
(str) - User selected choice
"""
print("\n\n{}\n".format(choice_type))
# Display the choices to user
for i in range(len(choices)):
print("{}. {}".format(i + 1, choices[i].capitalize()))
# User input for number for choice selection
while True:
try:
# Input prompt for Choice
choice = input("\nEnter your choice: ")
# Convert input text in lowercase
choice_lower = choice.lower()
# Check if choice text match in list
if choice_lower in choices:
# Display corrected selection
print("\nYou selected: {}".format(choice_lower.capitalize()))
return choice_lower
elif choice_lower.isdigit():
# Check if user used number for selection
# Check if number choice has value in list
choice_value = choices[int(choice_lower) - 1]
# Display corrected selection
print("\nYou selected: {}".format(choice_value.capitalize()))
return choice_value
else:
if len(default_value) > 0:
# Check for default value, if yes return that in wrong input event
return default_value
else:
# Display input error message
print("\nPlease enter a valid choice (text or number).")
except KeyboardInterrupt:
# Keyboard Interrupt need to handled so program can exit properly
print("Exiting..")
# Make sure we are passing the exception to chain
raise
except:
if len(default_value) > 0:
# Check for default value, if yes return that in wrong input event
return default_value
else:
print("\nPlease enter a valid choice (1,2,3,...) or text.") | 29ea8bb8eb0cb8e1bd7d1142fa08df2a77de3484 | 683,296 |
def partTypeNum(partType):
""" Mapping between common names and numeric particle types. """
if str(partType).isdigit():
return int(partType)
if str(partType).lower() in ['gas','cells']:
return 0
if str(partType).lower() in ['dm','darkmatter']:
return 1
if str(partType).lower() in ['tracer','tracers','tracermc','trmc']:
return 3
if str(partType).lower() in ['star','stars','stellar']:
return 4 # only those with GFM_StellarFormationTime>0
if str(partType).lower() in ['wind']:
return 4 # only those with GFM_StellarFormationTime<0
if str(partType).lower() in ['bh','bhs','blackhole','blackholes']:
return 5
raise Exception("Unknown particle type name.") | 311f290a38c74052b8deb377d5b71e3f2683c154 | 683,297 |
import itertools
def _split_list(lst, delim):
"""
리스트를 delimiter로 split하는 함수
>>> _split_list(['가/JKS', '_', '너/NP'], '_')
[['가/JKS'], ['너/NP']]
Args:
lst: 리스트
delim: delimiter
Returns:
list of sublists
"""
sublists = []
while lst:
prefix = [x for x in itertools.takewhile(lambda x: x != delim, lst)]
sublists.append(prefix)
lst = lst[len(prefix):]
delims = [x for x in itertools.takewhile(lambda x: x == delim, lst)]
lst = lst[len(delims):]
return sublists | 9d10aa325109227e512bf30f5b04738b718cb11d | 683,298 |
def save_omas_dx(odx, filename):
"""
Save an ODX to xarray dataset
:param odx: OMAS data xarray
:param filename: filename or file descriptor to save to
"""
return odx.omas_data.to_netcdf(filename) | 6eb594d4247941f4b0b5407341740381d75a8cfe | 683,299 |
def lremove(s, prefix):
"""Remove prefix from string s"""
return s[len(prefix):] if s.startswith(prefix) else s | b19235b643c3c0074e08f2b939353f05b6d0f630 | 683,303 |
import requests
from bs4 import BeautifulSoup
def prelimSeeds(URL: str) -> dict:
"""Parses the prelim seeds page of a tournament
Args:
URL (str): the URL of the prelim seeds page of a certain division
Returns:
dict: a dict containing the parsed prelim data
RETURN SCHEMA:
{
<(str) team code> : [
<(int) prelim rank>,
<(int) total # of prelim teams>
],
...
}
"""
data = {}
# Getting page and setting up parser
r = requests.get(URL)
soup = BeautifulSoup(r.text, "html.parser")
# Getting all rows except for the first header
rawData = soup.find_all("tr")
rawData = rawData[1:len(rawData)]
# Getting number of entries
numEntries = len(rawData)
for r, element in enumerate(rawData, start = 1):
rawEntryData = element.find_all("td")
textData = []
for node in rawEntryData:
nodeText = node.get_text().replace('\t','').split('\n')
textData.append(nodeText)
try:
pos = int(textData[0][1])
code = textData[1][1]
data[code] = [pos, numEntries]
except Exception as e:
print(e)
return data | e508e4adf51392e5ed69e656db9dd9df92a836ab | 683,306 |
def get_bbox_inside_image(label_bbox: list, image_bbox: list) -> list:
"""
Corrects label_bbox so that all points are inside image bbox.
Returns the corrected bbox.
"""
xA = max(label_bbox[0], image_bbox[0])
yA = max(label_bbox[1], image_bbox[1])
xB = min(label_bbox[2], image_bbox[2])
yB = min(label_bbox[3], image_bbox[3])
corrected_label_bbox = [xA, yA, xB, yB]
return corrected_label_bbox | c14d06796d1668d06d39ffec6f66e1bc6929b677 | 683,308 |
def date_to_string(date, granularity=0):
""" Convert a date to a string, with an appropriate level of
granularity.
:param date: A datetime object.
:param granularity: Granularity for the desired textual representation.
0: precise (date and time are returned)
1: day (only the week day is returned)
2: month (only year and month are returned)
3: year (only year is returned)
:return: A textual representation of the date.
"""
if not date:
return ""
if granularity == 0:
return date.strftime("%A")
elif granularity == 1:
return date.strftime("%A, %d")
elif granularity == 2:
return date.strftime("%A, %d %B")
return date.strftime("%A, %d %B, %H:%M%p") | 8c560f4dd8dd6508e7ddf9aa7b6b649ab13a6008 | 683,309 |
def fetch_ref_codon(ref_pos, curr_gene, curr_seq):
""" Fetch codon within gene for given site """
# position of site in gene
within_gene_pos = ref_pos - curr_gene['start'] if curr_gene['strand'] == '+' else curr_gene['end'] - ref_pos
# position of site in codon
within_codon_pos = within_gene_pos % 3
# gene sequence (oriented start to stop)
ref_codon = curr_seq[within_gene_pos-within_codon_pos:within_gene_pos-within_codon_pos+3]
return ref_codon, within_codon_pos | 0ee6454a76ade51950cacbd5f7f8859677b607a3 | 683,312 |
import numbers
def _scale(scale):
""" Given a numeric input, return a 2-tuple with the number repeated.
Given a 2-tuple input, return the input
>>> _scale(2)
(2, 2)
>>> _scale((1, 2,))
(1, 2)
>>> _scale('nonsense')
Traceback (most recent call last):
...
TypeError: argument should be a number or a tuple
>>> _scale((1,2,3))
Traceback (most recent call last):
...
ValueError: scale should be a 2-tuple
"""
if isinstance(scale, tuple):
if len(scale) != 2:
raise ValueError('scale should be a 2-tuple')
return scale
elif isinstance(scale, numbers.Real):
return (scale, scale)
else:
raise TypeError('argument should be a number or a tuple') | ac3bda11c58016b7a1db32ed47aa5d0a8e120b1c | 683,313 |
def checkValid(s, row, col):
"""
Returns True if a given cell is valid in a Sudoku puzzle, and
False if not. A cell is valid if the number in that cell is not present
in any of the cells in the same row, or the same column, or the same block.
"""
block_row = row // 3
block_col = col // 3
# Row and Column
# Ignore blank spots
for m in range(9):
if s[row][m] != 0 and m != col and s[row][m] == s[row][col]:
return False
if s[m][col] != 0 and m != row and s[m][col] == s[row][col]:
return False
# Block
for m in range(3):
for n in range(3):
newRow = m + block_row*3
newCol = n + block_col*3
if s[newRow][newCol] != 0 and newRow != row and newCol != col\
and s[newRow][newCol ] == s[row][col]:
return False
return True | 50397a98099612b7c9e225748662f092beff5d1e | 683,315 |
from typing import Any
def _len(x: Any) -> int:
"""Return len of x if it is iterable, else 0."""
return max(1, len(x)) if isinstance(x, list) else 0 | a79dfef8014152de222135fb65ce0e95d32d1489 | 683,324 |
def timedelta_to_seconds(td):
"""
Converts a timedelta to total seconds, including support for microseconds.
Return value is (potentially truncated) integer.
(This is built-in in Python >= 2.7, but we are still supporting Python 2.6 here.)
:param td: The timedelta object
:type td: :class:`datetime.timedelta`
:return: The number of total seconds in the timedelta object.
:rtype: int
"""
if not td:
return None
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 | b53f200174a483321e4664af639f3677467e6bb1 | 683,325 |
def luhn_checksum(num: str) -> str:
"""Calculate a checksum for num using the Luhn algorithm.
:param num: The number to calculate a checksum for as a string.
:return: Checksum for number.
"""
check = 0
for i, s in enumerate(reversed(num)):
sx = int(s)
sx = sx * 2 if i % 2 == 0 else sx
sx = sx - 9 if sx > 9 else sx
check += sx
return str(check * 9 % 10) | 18b584959268e510877b2b26e9b968abcfcdc644 | 683,326 |
def test_chess_cell(x, y):
"""
Source https://pythontutor.ru/lessons/ifelse/problems/chess_board/
Condition
Two checkerboard squares are set. If they are painted the same color,
print the word YES, and if in different colors - then NO.
The program receives four numbers from 1 to 8 each, specifying the column number and
line number first for the first cell, then for the second cell.
"""
if x % 2 != 0 and y % 2 != 0: # False if cell white, True if cell black
return True
if x % 2 != 0 and y % 2 == 0:
return False
if x % 2 == 0 and y % 2 != 0:
return False
else:
return True | 2f5d597fa869949ba0ca205c799aa3f98a2fa75d | 683,330 |
import re
def parse_description(dsc):
"""
Parse the given string into a hash.
The string format is `key: value`,
where key gets converted to upper case
and value extends until a new line.
A special last field `Abstract:` extends until the end of string.
"""
meta, abstract = re.split('abstract:\s*\n*', dsc, flags=re.I)
ret = {'ABSTRACT': abstract}
for line in meta.splitlines():
if ':' in line:
key, value = re.split('\s*:\s*', line, 1)
key = key.upper()
ret[key] = value
return ret | 5a336706671a68f7b284e8a862441982a539f84b | 683,334 |
def decode(packet):
"""
https://raw.githubusercontent.com/telldus/telldus/master/telldus-core/service/ProtocolOregon.cpp
>>> decode(dict(data=0x201F242450443BDD, model=6701))["data"]["temp"]
24.2
>>> decode(dict(data=0x201F242450443BDD, model=6701))["data"]["humidity"]
45.0
"""
if packet["model"] != 6701:
raise NotImplementedError(
"The Oregon model %i is not implemented." % packet["model"]
)
data = packet["data"]
value = int(data)
value >>= 8
checksum1 = value & 0xFF
value >>= 8
checksum = ((value >> 4) & 0xF) + (value & 0xF)
hum1 = value & 0xF
value >>= 8
checksum += ((value >> 4) & 0xF) + (value & 0xF)
neg = value & (1 << 3)
hum2 = (value >> 4) & 0xF
value >>= 8
checksum += ((value >> 4) & 0xF) + (value & 0xF)
temp2 = value & 0xF
temp1 = (value >> 4) & 0xF
value >>= 8
checksum += ((value >> 4) & 0xF) + (value & 0xF)
temp3 = (value >> 4) & 0xF
value >>= 8
checksum += ((value >> 4) & 0xF) + (value & 0xF)
address = value & 0xFF
value >>= 8
checksum += ((value >> 4) & 0xF) + (value & 0xF)
checksum += 0x1 + 0xA + 0x2 + 0xD - 0xA
if checksum != checksum1:
raise ValueError(
"The checksum in the Oregon packet does not match "
"the caluclated one!"
)
temperature = ((temp1 * 100) + (temp2 * 10) + temp3) / 10.0
if neg:
temperature = -temperature
humidity = (hum1 * 10.0) + hum2
return dict(
packet,
sensorId=address,
data=dict(temp=temperature, humidity=humidity),
) | 7b2030d4ae9a6f5773f7cd3aa305ba400c02e93a | 683,336 |
import codecs
def is_ascii_encoding(encoding):
"""Checks if a given encoding is ASCII."""
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False | 34b7e9ff3bcab56607062740d2caa7cd5bbeecd3 | 683,339 |
def _find_idx_without_numerical_difference(df, column1, column2, delta, idx=None, equal_nan=False):
"""
Returns indices which have bigger numerical difference than delta.
INPUT:
**df** (DataFrame)
**column1** (str) - name of first column within df to compare.
The values of df[column1] must be numericals.
**column2** (str) - name of second column within df to compare.
The values of df[column2] must be numericals.
**delta** (numerical) - value which defines whether indices are returned or not
OPTIONAL:
**idx** (iterable, None) - list of indices which should be considered only
**equal_nan** (bool, False) - if True, indices are included where at least value in
df[column1] or df[column2] is NaN
OUTPUT:
**index** (pandas.Index) - index within idx where df[column1] and df[column2] deviates by
at least delta or, if equal_na is True, one value is NaN
"""
idx = idx if idx is not None else df.index
idx_isnull = df.index[df[[column1, column2]].isnull().any(axis=1)]
idx_without_null = idx.difference(idx_isnull)
idx_no_delta = idx_without_null[(
df.loc[idx_without_null, column1] -
df.loc[idx_without_null, column2]).abs().values <= delta]
if equal_nan:
return idx.difference(idx_no_delta)
else:
return idx_without_null.difference(idx_no_delta) | 9ed9f34b1b8718ee213fd7b8832e5efe7365f116 | 683,342 |
def getDomainOnly(url):
"""Return the domain out from a url
url = the url
"""
# print ("getDomainOnly : ", url)
tmp = url.split('.')[-2] + '.' + url.split('.')[-1]
tmp = tmp.split('/')[0]
return tmp | 568a056533e4a5014deb0c1a53ac08c994caf6ab | 683,343 |
import time
def sb_session_login(sb_session, sb_username, sb_password=None):
"""
login in to sb session using the input credentials. Checks to see if
you are already logged in. If no password is given, the password will be
requested through the command prompt.
.. note:: iPython shells will echo your password. Use a Python command
shell to not have your password echoed.
:param sb_session: sciencebase session object
:type sb_session: sciencebasepy.SbSession
:param sb_username: sciencebase username, typically your full USGS email
:type sb_username: string
:param sb_password: AD password
:type sb_password: string
:returns: logged in sciencebasepy.SbSession
"""
if not sb_session.is_logged_in():
if sb_password is None:
sb_session.loginc(sb_username)
else:
sb_session.login(sb_username, sb_password)
time.sleep(5)
return sb_session | 62b9b605e0537054c09f4caebab66d250df1aaeb | 683,346 |
def subroutine_type(name):
"""Returns type of subroutine, 'setup' or 'teardown' if it has
either of those names, or module setup or teardown, otherwise None."""
lowername = name.lower()
if lowername == 'setup':
subtype = 'global setup'
elif lowername == 'teardown':
subtype = 'global teardown'
elif lowername.startswith('test_'):
subtype = 'test'
elif 'setup_' in lowername or '_setup' in lowername:
subtype = 'setup'
elif 'teardown_' in lowername or '_teardown' in lowername:
subtype = 'teardown'
else:
subtype = None
return subtype | e9675b3719b0fac8695d5251adf87e98dd1352b6 | 683,351 |
def permutation(s):
"""
@s: list of elements, eg: [1,2,3]
return: list of permutations, eg: [[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], [3,2,1]]
"""
ret = []
length = len(s)
if length == 0:
return ret
if length == 1:
return [s]
curr = s[0]
for prev in permutation(s[1:]):
for i in range(length):
ret.append(prev[:i] + [curr] + prev[i:])
return ret | bb49487012cd54f9c58ac7c1cce87c1c05eb33ed | 683,353 |
import torch
def rot90(input_, k=1, axes=(0, 1)):
"""Wrapper of `torch.rot90`
Parameters
----------
input_ : DTensor
Input dense tensor.
k : int, optional
Number of rotation times, by default 1
axes : tuple, optional
The axes in which the input is rotated, by default (0, 1)
"""
return torch.rot90(input_._data, k, dims=axes) | 2b784a82ae7fa2d1b87dd687d0ba6ed796dda248 | 683,356 |
import torch
def get_optimizer(model, learning_rate, optimizer_state_dict=None, verbose=False):
"""
Returns an ADAM optimizer attached to the given model
(and stored on the same device).
If optimizer_state_dict is not None, it will fill in the optimizer state.
However, learning_rate will override and saved optimizer state (to allow
you to adjust during training.)
"""
if verbose: print("Building optimizer... ", end='')
optimizer = torch.optim.Adam(model.parameters(), learning_rate)
if optimizer_state_dict:
optimizer.load_state_dict(optimizer_state_dict)
# allow for change in learning rate:
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
if verbose: print("Done.")
return optimizer | 404224341d137ae662a25d787b6c0b3cbb409448 | 683,360 |
def inc(x):
"""
Returns a number one greater than num.
"""
return x + 1 | 2f74ab001252b295b8acd8effabb2c30e526b66a | 683,362 |
import requests
def get_latest_release_from_pypi(*, package: str) -> str:
"""Get the latest release of a package on pypi"""
response = requests.get(f"https://pypi.org/pypi/{package}/json").json()
return response["info"]["version"] | 6f24b23f87c765a3e59d87cd4c1deb5414e64b1e | 683,363 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.