content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def print_number_and_permutations(permutations):
"""
Given a newline-separated list of combinations,
return the number of combinations as well as the
original list.
"""
number = len(permutations.split("\n"))
return("%s\n%s" % (number, permutations)) | a040dff9c8c46912001ea29c0d35eb2f8ab2f9d6 | 686,941 |
def is_white(r, g, b):
"""
Check if an RGB code is white.
:param r: Red byte.
:param g: Green byte.
:param b: Blue byte.
:return: True if the pixel is white, False otherwise.
"""
if r == 255 and g == 255 and b == 255:
return True
else:
return False | 7d409033b7d30eec6f10be68960115f94b260dc3 | 686,942 |
def set_simulation(config, components, exclude):
"""Choose which components to simulate, and which parts to exclude."""
sim_config = """
simulation:
components: [{components}]
exclude: [{exclude}]
""".format(components=", ".join(components),
exclude=", ".join(exclude))
return config + sim_config[1:] | 2f4a5061f63d4bbcd556e3cd66a2ef9d12ed0f81 | 686,943 |
import torch
def calc_bbox_iou_matrix(pred: torch.Tensor):
"""
calculate iou for every pair of boxes in the boxes vector
:param pred: a 3-dimensional tensor containing all boxes for a batch of images [N, num_boxes, 4], where
each box format is [x1,y1,x2,y2]
:return: a 3-dimensional matrix where M_i_j_k is the iou of box j and box k of the i'th image in the batch
"""
box = pred[:, :, :4] #
b1_x1, b1_y1 = box[:, :, 0].unsqueeze(1), box[:, :, 1].unsqueeze(1)
b1_x2, b1_y2 = box[:, :, 2].unsqueeze(1), box[:, :, 3].unsqueeze(1)
b2_x1 = b1_x1.transpose(2, 1)
b2_x2 = b1_x2.transpose(2, 1)
b2_y1 = b1_y1.transpose(2, 1)
b2_y2 = b1_y2.transpose(2, 1)
intersection_area = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union_area = (w1 * h1 + 1e-16) + w2 * h2 - intersection_area
ious = intersection_area / union_area
return ious | 709fc44f5d9645dc80ef4234d640c5ff53a8b335 | 686,946 |
def get_locations(twitter_data: dict) -> dict:
"""
Returns a dictionary where keys are users' accounts and the values are their locations.
"""
locations_dct = dict()
for user in twitter_data['users']:
if user['location']:
locations_dct.update({user['screen_name']: user['location']})
return locations_dct | 716ddb8bc4ec7f529cb2dbabbd423c0d8960c949 | 686,949 |
def propagateParents(currentTerm, baseGOid, GOdict, parentSet):
"""
Propagates through the parent hierarchy of a provided GO term to create a set of all higher order parents.
Each term's recursive_parents attribute will be filled with all recursively found parent terms.
Parameters
----------
currentTerm : str
The GO id that is being visited.
baseGOid : str
The original GO term id for which the search for its parents was started.
GOdict : dict
A dictionary of GO objects generated by importOBO().
Keys are of the format `GO-0000001` and map to goTerm objects.
parentSet : set
An, initially, empty set that gets passed through the recursion.
It tracks the entire recursive group of parent terms of the original base GO id (i.e. the starting point
of the function call).
Returns
-------
None
Updates the parentSet set inplace so that it contains all the (recursive) parents for the baseGOid.
"""
# If current term has no further parents the recursion will end and move back up the stack,
# since there are no more parents left to iterate over (because looping through None does nothing).
parents = GOdict.get(currentTerm).parents
# For each parent of the current term under consideration
for parent in parents:
# Check if parent is present in GO dictionary
# This is required because namespace filtering might lead to parent terms
# that are no longer present as GOterm objects themselves.
if parent in GOdict:
# Add current term's parents to growing set
parentSet.add(parent)
# and recurse function for each parent
propagateParents(parent, baseGOid, GOdict, parentSet)
else:
# Print a warning that a parent term was reported for the original base term,
# yet the term is absent from the gene ontology file
print('WARNING!\n' + parent, 'was defined as a parent for',
baseGOid, ', but was not found in the OBO file.\n')
return None | ac59e11ca95a58f8dd52f62af5679add375ee823 | 686,950 |
import json
def load_config(config_file="config.json"):
"""
Load configration information from a .json file.
In the future:
The DISCORD_TOKEN should be read from an environment variable and the
channel ids should be pulled down from webhooks.
"""
conf = json.load(open(config_file))
token = conf["DISCORD_TOKEN"]
master_channel = conf["MASTER_CHANNEL"]
poll_channel = conf["POLL_CHANNEL"]
return token, master_channel, poll_channel | f1a246c9ee1e337abe0cdff5668dc0b47db699dc | 686,958 |
def partition(rows, question):
"""
Partitions a dataset.
For each row in the dataset,
check if it matches the question.
If so, add it to 'true rows',
otherwise, add it to 'false rows'.
PARAMETERS
==========
rows: list
A list of lists to store the rows
of the dataset to be partitioned.
question: object
Object of the class Question.
RETURNS
=======
true_rows
A list of lists that stores the rows
for which the split question evaluates
to true.
false_rows
A list of lists that stores the rows
for which the split question evaluates
to false.
"""
true_rows, false_rows = [], []
for row in rows:
if question.match(row):
true_rows.append(row)
else:
false_rows.append(row)
return true_rows, false_rows | d13bb3c4d5d4eaee7f9b617d3ce79a6cd8c1f6b1 | 686,962 |
import torch
import math
def pose_error(R0: torch.Tensor, t0: torch.Tensor, R1: torch.Tensor, t1: torch.Tensor):
"""Compute the rotation and translation error.
Adapted from PixLoc (author: Paul-Edouard Sarlin) https://psarlin.com/pixloc/
Args:
* R0: The [3 x 3] first rotation matrix.
* t0: The [3] first translation vector.
* R1: The [3 x 3] second rotation matrix.
* t1: The [3] second translation vector.
Returns:
* The rotation (in degrees) and translation error.
"""
dt = torch.norm(t0.ravel() - t1.ravel(), p=2, dim=-1)
trace = torch.diagonal(R0.transpose(-1, -2) @ R1, dim1=-1, dim2=-2).sum(-1)
cos = torch.clamp((trace - 1) / 2, -1, 1)
dr = torch.acos(cos).abs() / math.pi * 180.0
return dr, dt | b1079781a3426ded29496bcff02097fb7f0a08ab | 686,964 |
def adl(file_name, is_weight=False, default_weight=0, bonus_key=""):
"""The function returns adjacency list representation of a graph.
bonus_key if set, will be added to all nested dictionaries.
Input data format:
With weights:
1 2,4 3,111
2 4,55 5,7
Output:
{1: {2: 4, 3: 111}, 2: {4: 55, 5: 7}}
Without weights:
1 2 3
2 4 5
Output:
{1: {2: None, 3: None}, 2: {4: None, 5: None}}
It can handle files where edges are in separate lines each:
1 2
1 3
2 4
2 5
"""
adj_list = {}
with open(file_name, 'r') as f:
for line in f.readlines():
if not line.strip():
continue
line = line.strip().split()
u = line[0]
if u not in adj_list:
adj_list[u] = {bonus_key: default_weight} if bonus_key else {}
for k in line[1:]:
if is_weight:
x, y = k.split(',')
else:
x = k.split(',')[0]
y = default_weight
adj_list[u][x] = int(y)
return adj_list | 69d3cc5cd2acba15bfeb52045fc0996d95c29061 | 686,966 |
import re
def extract_stop_words(body: str) -> set:
"""Parse stop words in a text body as delimited by whitespace.
:param body: the body of the text to parse
:returns: a set of "stop-words"
"""
return {word for word in re.findall(r'\w+', body)} | 2f9193e2daeadaa67af9c9e60e13d185fecec98c | 686,971 |
import requests
def request_json(url, **kwargs):
"""Send a GET request to one of the API endpoints that returns JSON.
Send a GET request to an endpoint, ideally a URL from the urls module.
The endpoint is formatted with the kwargs passed to it.
This will error on an invalid request (requests.Request.raise_for_status()), but will otherwise return a dict.
"""
r = requests.get(url.format(**kwargs))
r.raise_for_status()
return r.json() | d4c2c4693f5820ae39aa6a57bddfe7fdf1928303 | 686,974 |
def str_to_bool(parameter):
"""
Utility for converting a string to its boolean equivalent.
"""
if isinstance(parameter, bool):
return parameter
if parameter.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif parameter.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'"{parameter}" is not a valid boolean value.') | 59aa60d1363b8c1f6aedc7e7d264dbec9b7fc242 | 686,976 |
import re
def parse_vocab_version(typename):
"""Parses a controlled vocabulary version from an instance ``xsi:type``
value.
Args:
typename: The ``xsi:type`` value found on a controlled vocabulary
instance.
Returns:
The version portion of a controlled vocabulary type instance. For
example, given ``vocabs:IndicatorTypeVocab-1.0``, this would return
``'1.0'``.
"""
type_ = re.split(":|-", typename)
return type_[2] | ec059f10a921cdc513ce51b142148236a58023c8 | 686,977 |
from typing import Callable
def is_class_view(handler: Callable) -> bool:
"""
Judge handler is django.views.View subclass
"""
return hasattr(handler, "view_class") | 3aa82680667166af4c539021fdb9350cd73f437a | 686,979 |
def default_filter(files):
"""Function to filter folders based on content
Parameters
----------
files : list
A list containing strings of filenames in directory
Returns
-------
bool : a flag indicating whether the list contains '1.mkv', '2.mkv'
and 'Labels.json'
"""
if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:
return True
return False | e5134a1507e2393bc86e2d918276a8ad3d031708 | 686,980 |
import re
def bump_version(version, bump='patch'):
"""
Increases version number.
:param version: str, must be in version format "int.int.int"
:param bump: str, one of 'patch, minor, major'
:returns: version with the given part increased, and all inferior parts reset to 0
"""
# split the version number
match = re.match('^(\d+)\.(\d+)\.(\d+)$', version)
if match is None:
raise ValueError('Invalid version string, must be int.int.int: "%s"' % version)
new_version = match.groups()
new_version = [int(x) for x in new_version]
# find the desired index
idx = dict(major=0, minor=1, patch=2)[bump]
# increment the desired part
new_version[idx] += 1
# reset all parts behind the bumped part
new_version = new_version[:idx + 1] + [0 for x in new_version[idx + 1:]]
return '%d.%d.%d' % tuple(new_version) | 598256cc32216821fa9bfd10a7146a9df4e5ed23 | 686,981 |
def ordinaryAnnuity(pymt, p, r, c, n):
"""Ordinary annuity formula
Returns: future value
Input values:
pymt : payment made during compounding period
p : principal
r : annual interest rate
c : number of compounding periods in a year
n : total number of payments
"""
block1 = ((1 + (r / c)) ** n) - 1
block2 = r / c
fv = pymt * (block1 / block2)
return fv | 3f31db7d1590d069ee1c69c867ee2285fa753080 | 686,985 |
def ext_euclid(x, y):
"""
Returns (g, a, b) such that g = gcd(x, y) = ax + by
"""
if y == 0:
# gcd = x and gcd = x = (1)x + (0)y
return (x, 1, 0)
else:
# Recursively, g = a1 * (y) + b1 * (x % y)
(g, a1, b1) = ext_euclid(y, x % y)
# a1 * (y) + b1 * (x % y) = b1 * x + (a1 - (x//y)) * y = g
(g, a, b) = (g, b1, a1 - (x // y) * b1)
return (g, a, b) | d92b036b5c872d10dcda18d19397fec7feb203b7 | 686,986 |
def _dual_var_for_layer(dual_var_getter, layer, name, shape):
"""Creates a dual variable for the given layer shape.
Args:
dual_var_getter: Function(name, shape) returning a dual variable.
layer: Layer for which dual variables should be generated.
name: Name to use for dual variable.
shape: Shape of the dual variable, or a possibly nested dict of shapes.
Returns:
Dual variable tensors of the given shape, or a possibly nested dict of
such tensors according to the structure of `shape`.
"""
if isinstance(shape, dict):
return {k: _dual_var_for_layer(dual_var_getter, layer, name + '_' + k, v)
for k, v in shape.items()}
else:
return dual_var_getter(layer, name, shape) | c888ad63a252a79395017ecda5ebf86350616f5d | 686,990 |
def shell_config(shell):
"""
Returns a dict in the following form, depending on the given shell:
return {
'prefix': '<preffix-to-use>',
'suffix': 'suffix-to-use',
'delimiter': '<delimiter-to-use>',
}
Logic from Docker Machine:
https://github.com/docker/machine/blob/master/commands/env.go#L125
"""
if shell == "fish":
return {"prefix": "set -gx ", "suffix": '";\n', "delimiter": ' "'}
elif shell == "powershell":
return {"prefix": "$Env:", "suffix": '"\n', "delimiter": ' = "'}
elif shell == "cmd":
return {"prefix": "SET ", "suffix": "\n", "delimiter": "="}
elif shell == "tcsh":
return {"prefix": "setenv ", "suffix": '";\n', "delimiter": ' "'}
elif shell == "emacs":
return {"prefix": '(setenv "', "suffix": '")\n', "delimiter": '" "'}
else:
return {"prefix": "export ", "suffix": '"\n', "delimiter": '="'} | 7909990038ccf2b13b9ed883f7a95b1802397230 | 686,995 |
import re
def format_symbol(symbol):
"""HGNC rules say symbols should all be upper case except for C#orf#. However, case is
variable in both alias and previous symbols as well as in the symbols we get in
submissions. So, upper case everything except for the one situation where mixed-case
is allowed, which are the genes like C2orf157.
Also, seurat and scanpy append ".1" or "-1" to duplicated gene names, and these altered
names persist throughout the life of the object. They won't match against the HGNC database
and we want to merge them, so we need to strip off the suffix and try matching again.
This function takes a symbol and returns the symbol with the fixed case and also with the
seurat/scanpy suffix stripped off.
"""
match = re.match(r"^(C)(\d+)(orf)(\d+)$", symbol, re.IGNORECASE)
if match:
fixed_case = f"C{match.group(2)}orf{match.group(4)}"
else:
fixed_case = symbol.upper()
suffix_stripped = re.sub(r"[\.\-]\d+$", "", fixed_case)
return fixed_case, suffix_stripped | a995a46965bc75b21388a0bf382092dfe02c3ef0 | 686,998 |
def build_tr_create_module_page_link(region, module_type_id):
"""
Build the direct link to the corresponding Threat Response page in the
given region for creating a module of the given type.
"""
if module_type_id is None:
return 'N/A'
return (
f'https://securex.{region}.security.cisco.com/settings/modules/'
f'available/{module_type_id}/new'
) | bde4888194beb532b82e6e2e0cb7c428b7005ac5 | 687,001 |
def _message_with_time(source, message, time):
"""Create one line message for logging purposes.
Parameters
----------
source : str
String indicating the source or the reference of the message.
message : str
Short message.
time : int
Time in seconds.
"""
start_message = "[%s] " % source
# adapted from joblib.logger.short_format_time without the Windows -.1s
# adjustment
if time > 60:
time_str = "%4.1fmin" % (time / 60)
else:
time_str = " %5.1fs" % time
end_message = " %s, total=%s" % (message, time_str)
dots_len = 70 - len(start_message) - len(end_message)
return "%s%s%s" % (start_message, dots_len * ".", end_message) | b9afb954995eb5aa1e89fb6179e1d38c55cdd0b5 | 687,003 |
import bisect
def leftmostBinSearch(vec, val):
"""
Return the leftmost position in the vector vec of val. If val is
absent then we return the lefternmost position for the value:
max(vec[vec < val]). The time complexity here is potentially worse
than log(n) because of the extra step of walking backwards.
"""
assert(len(vec) > 0)
i = bisect.bisect_left(vec, val)
if (i == 0):
return(i)
elif (i == len(vec)):
v = vec[i-1]
i -= 1
else:
v = vec[i]
if (v > val):
i -= 1
while (i > 0 and vec[i-1] == vec[i]):
i -= 1
return(i) | cfa15da61aff3b293cbd170c3522fe8b91afac3a | 687,015 |
def computeHSL(hexValue):
"""
Given a six-digit hex code (no #), compute the hue, saturation, and
luminosity. Returns a list consisting of the hue, saturation, and
luminosity values. Hue is a (float?) between 0 and 360, luminosity and
saturation floats between 0 and 1
"""
red = int('0x' + hexValue[0:2], 16)
green = int('0x' + hexValue[2:4], 16)
blue = int('0x' + hexValue[4:6], 16)
redF = float(red) / 255
greenF = float(green) / 255
blueF = float(blue) / 255
colorList = [redF, greenF, blueF]
maxColor = max(colorList)
minColor = min(colorList)
L = maxColor + minColor / 2
if maxColor == minColor:
S = 0
H = 0
else:
if L < .5:
S = (maxColor - minColor) / (maxColor + minColor)
else:
S = (maxColor - minColor) / (2 - maxColor - minColor)
if redF == maxColor:
H = (greenF - blueF) / (maxColor - minColor)
elif green == maxColor:
H = 2 + (blueF - redF) / (maxColor - minColor)
else:
H = 4 + (redF - greenF) / (maxColor - minColor)
H = (H / 6) * 360
return [H, S, L] | ec642fa1fdfe32cd36444267e9dd21bc96766e1a | 687,016 |
def last_player(played_cards, players):
"""
Return person who played the last card.
E.g.:
last_player([(1, "S"), (2, "S")], ["Abi", "Bob"])
returns: "Bob"
Args:
played_cards (list):
players (list):
Returns:
return (str): The players name
"""
if len(played_cards) == 0:
return None
return players[(len(played_cards) - 1) % len(players)] | 792f4b749f1dbc64dbc34b4c78e50cb847a63fe1 | 687,018 |
import hashlib
def CalculateMD5Checksum(filename):
"""Calculate the MD5 checksum for filename."""
md5 = hashlib.md5()
with open(filename, 'rb') as f:
data = f.read(65536)
while len(data) > 0:
md5.update(data)
data = f.read(65536)
return md5.hexdigest() | 3108bbd28b80f5fc719c9e09f04e8e28f1251cb9 | 687,024 |
def nullify(data: dict) -> dict:
"""
Nullify empty strings in a dict
"""
for key, val in data.items():
if val == "":
data[key] = None
return data | 69fab1a74ab8bfcdab2a9f17e9ba8aa7da2e801c | 687,032 |
def compute_chunksize(src, w, h, chunksize=None, max_mem=None):
"""
Attempts to compute a chunksize for the resampling output array
that is as close as possible to the input array chunksize, while
also respecting the maximum memory constraint to avoid loading
to much data into memory at the same time.
Parameters
----------
src : dask.array.Array
The source array to resample
w : int
New grid width
h : int
New grid height
chunksize : tuple(int, int) (optional)
Size of the output chunks. By default the chunk size is
inherited from the *src* array.
max_mem : int (optional)
The maximum number of bytes that should be loaded into memory
during the regridding operation.
Returns
-------
chunksize : tuple(int, int)
Size of the output chunks.
"""
start_chunksize = src.chunksize if chunksize is None else chunksize
if max_mem is None:
return start_chunksize
sh, sw = src.shape
height_fraction = float(sh)/h
width_fraction = float(sw)/w
ch, cw = start_chunksize
dim = True
nbytes = src.dtype.itemsize
while ((ch * height_fraction) * (cw * width_fraction) * nbytes) > max_mem:
if dim:
cw -= 1
else:
ch -= 1
dim = not dim
if ch == 0 or cw == 0:
min_mem = height_fraction * width_fraction * nbytes
raise ValueError(
"Given the memory constraints the resampling operation "
"could not find a chunksize that avoids loading too much "
"data into memory. Either relax the memory constraint to "
"a minimum of %d bytes or resample to a larger grid size. "
"Note: A future implementation could handle this condition "
"by declaring temporary arrays." % min_mem)
return ch, cw | bb1921eea934ebbe3450de69913687af5f4fecb9 | 687,034 |
def listify(s):
"""
Converts s into a list if not already. If s is None, an empty list will be returned.
"""
if s is None:
s = []
elif isinstance(s, (set, tuple)):
s = [i for i in s]
elif not isinstance(s, list):
s = [s]
return s | 1b44b3b3a041b3df5d6cfbd08394e421b3c6e831 | 687,035 |
import re
def normalize_keys(dict_, lowercase=True, separator='_'):
"""
Recoursively changes keys to their normalized version:
- replaces any special symbol by `separator`
- lowercases (if necessary).
Example:
In [1]: input_ = {"Content-Type": "text/html",
...: "Last-Modified": {
...: "Day-Of-Week": "Sat",
...: "Day": 4,
...: "Month": "Apr"
...: }
...: }
Out[1]:
{'content_type': 'text/html',
'last_modified': {'day_of_week': 'Sat', 'day': 4, 'month': 'Apr'}}
"""
normalized = {}
for key, val in dict_.items():
new_key = re.sub('[^A-Za-z0-9]+', separator, key)
new_key = new_key.lower() if lowercase else new_key
if isinstance(val, dict):
val = normalize_keys(val, lowercase, separator)
normalized[new_key] = val
return normalized | a15a3f4ccfe860af2ca7e758b293b297f8b0c3b3 | 687,036 |
def dedent_initial(s: str, n: int = 4) -> str:
"""Remove identation from first line of text."""
return s[n:] if s[:n] == ' ' * n else s | 28d27fad508a8d682fefd88d8905c6f60eccc215 | 687,037 |
def manage_none_value(diff_result, column_list):
"""
To handle None values,it appends Null to the missing column name in the
result.
Args:
diff_result(list):Result of the datavalidation.
column_list(list):List of column names of the table.
Returns:
Return the list of dictionaries with processed data.
"""
processed_data = list()
for each_dict in diff_result:
temp_dict = dict()
for each_key in column_list:
temp_dict[each_key] = each_dict.get(each_key, None)
processed_data.append(temp_dict)
return processed_data | 272b29c43278cb682a5f5fb908186ea97f13ab7e | 687,043 |
def sum_to_leftmost(value, dim):
"""Sum out `value.ndim-dim` many rightmost dimensions of a given tensor.
Args:
value (Tensor): A tensor of `.ndim` at least `dim`.
dim (int): The number of leftmost dims to remain.
Returns:
The result tensor whose ndim is `min(dim, value.dim)`.
"""
if value.ndim <= dim:
return value
return value.sum(list(range(dim, value.ndim))) | b4bd80c149e518b703648107f09cd3a481bb9500 | 687,047 |
def load(filename):
""" 读文件
Args:
filename: str, 文件路径
Returns:
文件所有内容 字符串
"""
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
return content | ef6e83192bd1f06ff530d8369e2f49a07fe677f1 | 687,049 |
def _human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. B, KB, MB, GB, TB, PB
Note that bytes will be reported in whole numbers but KB and above will have
greater precision. e.g. 43 B, 443 KB, 4.3 MB, 4.43 GB, etc
"""
UNIT_SIZE = 1000.0
suffixes_table = [('B', 0), ('KB', 1), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(size_bytes)
the_precision = None
the_suffix = None
for suffix, precision in suffixes_table:
the_precision = precision
the_suffix = suffix
if num < UNIT_SIZE:
break
num /= UNIT_SIZE
if the_precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=the_precision))
return "%s%s" % (formatted_size, the_suffix) | 127ce0f3ce89d9cb294ecb4874c937840ac8c166 | 687,054 |
def _include_branding_code_in_app(dist):
"""Returns whether to omit the branding code from the Chrome .app bundle.
If a distribution is packaged in a PKG (but is not also packaged in a DMG),
then the brand code is carried in the PKG script, and should not be added to
the .app bundle's Info.plist.
Args:
dist: The |model.Distribution|.
Returns:
Whether to include the branding code in the app bundle.
"""
return dist.package_as_dmg or not dist.package_as_pkg | 477cacaf01b46d024d76456756516f4f4ef04c64 | 687,059 |
from typing import Any
def check_str(data: Any) -> str:
"""Check if data is `str` and return it."""
if not isinstance(data, str):
raise TypeError(data)
return data | c856ce5180b56c79be2218c98ed33e2157f66400 | 687,060 |
def _parse_memory(s: str) -> int:
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
Examples
--------
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {"g": 1024, "m": 1, "t": 1 << 20, "k": 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()]) | a5b5dae1b82ac63da84d2a8de77f811a266e2efa | 687,062 |
import json
def Read_Message_Dict(BB, msg_name):
"""
Read_Message_Dict(BB, msg_name):
Reads the named BB message contents, json.loads it, and returns the resulting dict.
Returns an empty dict if the message is not found or could not be read back.
"""
try:
msg_item = BB.ReadMessage(msg_name)
msg_item = json.loads(msg_item)
try:
test = msg_item.keys()
except:
msg_item = {}
except:
msg_item = {}
return msg_item | daa390ddddf07d0031587f07993b63290e825db7 | 687,063 |
def parse_scoped_selector(scoped_selector):
"""Parse scoped selector."""
# Conver Macro (%scope/name) to (scope/name/macro.value)
if scoped_selector[0] == '%':
if scoped_selector.endswith('.value'):
err_str = '{} is invalid cannot use % and end with .value'
raise ValueError(err_str.format(scoped_selector))
scoped_selector = scoped_selector[1:] + '/macro.value'
scope_selector_list = scoped_selector.rsplit('/', 1)
scope = ''.join(scope_selector_list[:-1])
selector = scope_selector_list[-1]
return scope, selector | 14ec603c3f4beedd49ca277e1f357140ede82539 | 687,067 |
def prettyprint_tokenized(tokenized: str) -> str:
"""Returns a pretty-printable version of a document that contains tokens."""
return tokenized.replace('\x1b', '<').replace('\x1c', '|').replace('\x1d', '>') | 7a3ee01b6a33104cf5168370b6c65f7fa87264e2 | 687,069 |
def copy_params_dict(model, copy_grad=False):
"""
Create a list of (name, parameter), where parameter is copied from model.
The list has as many parameters as model, with the same size.
:param model: a pytorch model
:param copy_grad: if True returns gradients instead of parameter values
"""
if copy_grad:
return [(k, p.grad.data.clone()) for k, p in model.named_parameters()]
else:
return [(k, p.data.clone()) for k, p in model.named_parameters()] | e0082f61230f7aeb2fbf8cc2c1855150985bc362 | 687,070 |
import math
def select_ghostdag_k(x, delta):
"""
Selects the k parameter of the GHOSTDAG protocol such that anticones lager than k will be created
with probability less than 'delta' (follows eq. 1 from section 4.2 of the PHANTOM paper)
:param x: Expected to be 2Dλ where D is the maximal network delay and λ is the block mining rate
:param delta: An upper bound for the probability of anticones larger than k
:return: The minimal k such that the above conditions hold
"""
k_hat, sigma, fraction, exp = 0, 0, 1, math.e ** (-x)
while True:
sigma += exp * fraction
if 1 - sigma < delta:
return k_hat
k_hat += 1
fraction = fraction * (x / k_hat) | 9731b509e35db024e17d63fbc6ef46235207c3ee | 687,072 |
def list_search(lst, key, value):
"""Search a list of dictionaries for the dict where dict[key] == value."""
try:
return next(dct for dct in lst if dct[key] == value)
except StopIteration:
raise KeyError() | b5f6d23835c731f376574f5e095ff332c8881035 | 687,073 |
def sum_of_squares(n):
""" returns the sum of squares of first n numbers """
iter = 1
sum = 0
while iter <= n:
sum += iter**2
iter += 1
return sum | 724032cf4806fe427d62c9a222b6b7c0e37673a7 | 687,074 |
def xyxy_to_normalized_xywh(box, size, center=True):
"""
Converts bounding box format from 'xyxy'
to 'xywh'.
Args:
box: [Upper Left x, Upper Left y, Lower Right x, Lower Right y]; unnormalized.
size: [image width, image height]
center (bool): If True, then the x, y refer to center coordinate. Otherwise,
it will be the top-left.
normalize (bool): If True, then the output x, y, w, h will be 0-1
Returns:
x, y, w, h
References:
- https://github.com/ultralytics/yolov3/issues/26
- https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#2-create-labels
"""
#[Upper Left x, Upper Left y, Lower Right x, Lower Right y]
img_width, img_height = size
x1, y1, x2, y2 = box
box_width = x2 - x1
box_height = y2 - y1
x = x1
y = y1
w = box_width
h = box_height
if center:
x = ((x1 + x2) / 2)
y = ((y1 + y2) / 2)
x /= img_width
y /= img_height
w /= img_width
h /= img_width
return x, y, w, h | eb20a19323c94c1d36fff7f4f1535aa2797687d5 | 687,077 |
import json
import codecs
def json_load(file_path):
"""
Loads an UTF-8 encoded JSON
:param file_path: Path to the JSON file
:type file_path: string
:rtype: dict
:return: The JSON dictionary
"""
return json.load(codecs.open(file_path, "r", encoding="utf-8")) | e9471165eb90f8bd98545b57c1884264cf2b9a49 | 687,078 |
def load_timestamps(filename):
""" load timestamps of a recording.
Each line of the file contains two numbers:
the frame index and the corresponding time in milliseconds.
Parameters
----------
filename: str
The file to extract timestamps from.
Returns
-------
dict:
Dictionary with frame index as key and timestamp as value.
"""
timestamps = {}
f = open(filename, 'r')
for line in f:
line = line.rstrip('\n')
splitted = line.split(' ')
timestamps[int(splitted[0])] = int(splitted[1])
f.close()
return timestamps | 91cbeef34d187ef6721bfea4fe2bea5ea65f51bb | 687,079 |
import math
def distance(a, b):
"""
Helper function for checking distance
between any two points on a cartesian grid
:param a: First point
:type a: tuple
:param b: Second point
:type b: tuple
:return: Distance between two points
:rtype: float
"""
return math.sqrt((b[0] - a[0])**2 + (b[1] - a[1])**2) | c1a70824c50699cf51c55aeabf9b94558ec21263 | 687,080 |
def get_items_of_type(type_, mapping):
"""Gets items of mapping being instances of a given type."""
return {key: val for key, val in mapping.items() if isinstance(val, type_)} | 62bb622074f516f998cdd9f96533ef53e24462a6 | 687,083 |
def mean(iterator, length):
""" Returns the arithmetic mean of the values in the given iterator.
"""
return sum(iterator) / float(length or 1) | 1697af991e7ea3cdb83b63e0238826896e9cd881 | 687,087 |
def inplace_return_series(dataframe, column, series,
inplace, return_series, target_column=None):
"""
helper function to reuse throughout library. It applies logic for
performing inplace series transformations and returning copies of
modified series
:param dataframe: pandas.DataFrame for which we are modifying series
:param column: str name of target column for our series
:param series: pandas.Series
:param inplace: bool whether we wish to overwrite existing column
with series
:param return_series: bool whether we wish to return a copy of the
pandas.Series object
:return: pandas.Series
"""
if inplace:
dataframe[target_column if target_column else column] = series
if return_series:
return series | 16213950f4f25b0993c196cd13751fb794eec484 | 687,089 |
def no_tests(tests):
"""Predicate for number of tests."""
return not tests or len(tests) == 0 | c417b556af900c14b42fa4f7b408db27a7542eff | 687,090 |
def _get_widget_selections(widgets, widget_selections):
"""Return lists of widgets that are selected and unselected.
Args:
widgets (list):
A list of widgets that we have registered already.
widget_selections (dict):
A dictionary mapping widgets
(:py:class:`reviewboard.admin.widgets.Widget`) to whether or not
they are selected (as a :py:class:`unicode`).
Returns:
tuple of list:
A 2-tuple containing a list of selected widgets (to display
on the dashboard) and a list of the unselected widgets.
"""
selected_widgets = []
unselected_widgets = []
if widget_selections:
for widget in widgets:
if widget_selections.get(widget.widget_id) == "1":
selected_widgets.append(widget)
else:
unselected_widgets.append(widget)
else:
selected_widgets = widgets
unselected_widgets = None
return selected_widgets, unselected_widgets | 25c3f2b8e68aaedba5232c4c3cbe2f3ec73c99df | 687,091 |
def search4vowels(phrase: str) -> set: # Informando ao usuário que deve retornar um conjunto no fim
"""
Função que procura vogais em palavras
:param phrase:str palavra provida para procurar vogais
:return: retorna a inserseção de vowels com word
"""
return set('aeiou').intersection(set(phrase.lower())) | dd51574eab6d9b52de28c7e31a194d15b79e9979 | 687,097 |
from io import StringIO
def get_number(token):
""" Turn leading part of a string into a number, if possible.
"""
num = StringIO()
for ch in token:
if ch.isdigit() or ch == '.' or ch == '-':
num.write(ch)
else:
break
val = num.getvalue()
num.close()
return val | 43e703f8fc1993aabc325de3729f87ca27c8fc85 | 687,099 |
def remove(pred):
"""Remove any item from collection on traversal if that item meets condition
specified in pred."""
def generator(coll):
for item in coll:
if not pred(item):
yield item
return generator | 11c81269977d1161a5a79f8aa16ec66246233707 | 687,100 |
def new_task_id(sources, prefix=""):
"""Generate a new unique task ID
The task ID will be unique for the given sources, and with the given prefix.
"""
existing_ids = set()
for source in sources:
existing_ids |= {int(key[len(prefix):]) for key in source.task_ids
if key.startswith(prefix) and key[len(prefix):].isnumeric()}
if len(existing_ids) > 0:
highest = max(existing_ids)
else:
highest = 0
return prefix + str(highest+1) | 4e91551f74c5354458ae73a70f919e50b44a02a8 | 687,101 |
def distance_difference_calc(r2, s1, gap):
"""
Computes the necessary distance between mocks given geometrical
components of the survey.
Parameters
-----------
r2 : `float`
s1 : `float`
gap : `float`
Returns
----------
dist_diff : `float`
"""
# Converting to floats
r2 = float(r2)
s1 = float(s1)
gap = float(gap)
dist_diff = (((r2 + gap)**2 - (0.5 * s1)**2)**0.5) - r2
return dist_diff | 499fbfddab00c2e3d2a81861b0e23ed07d4e0b5c | 687,105 |
import csv
def read_gold_qdmrs(file_path):
"""Reads csv file of QDMR strings
and converts it into a csv containing processed QDMRs
Parameters
----------
file_path : str
Path to csv file containing,
question_id, question text, question decomposition
Returns
-------
list
Dictionary from question_id to question_text and decomposition
"""
# append data into one dictionary
data_dict = {}
with open(file_path, encoding="utf8") as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
question_id = row["question_id"]
question_text = row["question_text"]
decomposition = row["decomposition"]
data_dict[question_id] = {}
data_dict[question_id]['question_text'] = question_text
data_dict[question_id]['decomposition'] = decomposition
line_count += 1
if line_count % 500 == 0:
print(f'Processed {line_count} lines.')
return data_dict | ac5bbbc6fcc7cba14b8fcb8195f071f416861bb9 | 687,107 |
def get_attr_groups(attr_name_file):
"""
Read attribute names one by one from attr_name_file and based on the common prefix, separate them into different attribute groups
Return list of starting indices of those groups
"""
new_group_idx = [0]
with open(attr_name_file, 'r') as f:
all_lines = f.readlines()
line0 = all_lines[0]
prefix = line0.split()[1][:10]
for i, line in enumerate(all_lines[1:]):
curr = line.split()[1][:10]
if curr != prefix:
new_group_idx.append(i+1)
prefix = curr
return new_group_idx | db2338320fa09c7e6b7e864ea42f8244d76337b9 | 687,109 |
def _service_and_endpoint_labels_from_method(method_name):
"""Get normalized service_label, endpoint_label tuple from method name"""
name_parts = method_name.split("/")
if len(name_parts) != 3 or name_parts[0] != "" or name_parts[1] == "" or name_parts[2] == "":
raise AssertionError("Invalid method name: {}".format(method_name))
service_label = name_parts[1].replace(".", "_")
endpoint_label = name_parts[2].replace(".", "_")
return service_label, endpoint_label | e10f2aae174691aad0713c6ea4bbd0fdd830b828 | 687,110 |
def old_hindu_lunar_leap(date):
"""Return the leap field of an Old Hindu lunar
date = [year, month, leap, day]."""
return date[2] | d11c9efab99552f608fe59761a16ee3a0dbf3246 | 687,122 |
def get_duplicates(setlist):
"""
Takes a list of sets, and returns a set of items that are found in
more than one set in the list
"""
duplicates = set()
for i, myset in enumerate(setlist):
othersets = set().union(*setlist[i+1:])
duplicates.update(myset & othersets)
return duplicates | 0d0e481ffb9490b6a422b83447d950c0fc09536b | 687,124 |
import binascii
import itertools
def xor(data, key):
"""Return `data` xor-ed with `key`."""
key = key.lstrip("0x")
key = binascii.unhexlify(key)
return bytearray([x ^ y for x, y in zip(data, itertools.cycle(key))]) | 59ed91cbdd7b67638b264f6a071eeb82e3c03e3f | 687,127 |
def step_count(group_idx):
"""Return the amount of index changes within group_idx."""
cmp_pos = 0
steps = 1
if len(group_idx) < 1:
return 0
for i in range(len(group_idx)):
if group_idx[cmp_pos] != group_idx[i]:
cmp_pos = i
steps += 1
return steps | fdae2ff6be5c48699d61e6b3c32f459d5f7f508a | 687,131 |
def ds_read_mock(data_set, *args, **kwargs):
"""
Mock of IkatsApi.ts.fid method
Same parameters and types as the original function
"""
return {"description": "description of my data set",
"ts_list": ['00001',
'00002',
'00003',
'00004']} | 1da970015a3affe088f54022b4b3da5e5f036dce | 687,135 |
def ns(s):
"""remove namespace, but only it there is a namespace to begin with"""
if '}' in s:
return '}'.join(s.split('}')[1:])
else:
return s | 5eeb2af8e22c91c7ae32f326fbfcca6611e4cba8 | 687,136 |
import inspect
def get_mro(cls):
"""
Wrapper on top of :func:`inspect.getmro` that recognizes ``None`` as a
type (treated like ``type(None)``).
"""
if cls is type(None) or cls is None:
return (type(None), object)
else:
assert isinstance(cls, type)
return inspect.getmro(cls) | 950616cd4e9f297638e0c48b78b42b5aa16d1776 | 687,137 |
def create_new_code(questionnaire, configuration):
"""
Create a new code for a Questionnaire based on the configuration.
Args:
questionnaire (Questionnaire): The Questionnaire object.
configuration (str): The code of the configuration.
Returns:
str.
"""
return '{}_{}'.format(configuration, questionnaire.id) | 0807033c1d251d257281d31269e8657e53867b39 | 687,138 |
def get_dbot_level(threat_level_id: str) -> int:
"""
MISP to DBOT:
4 = 0 (UNDEFINED to UNKNOWN)
3 = 2 (LOW to SUSPICIOUS)
1 | 2 = 3 (MED/HIGH to MALICIOUS)
Args:
threat_level_id (str):
Returns:
int: DBOT score
"""
if threat_level_id in ('1', '2'):
return 3
if threat_level_id == '3':
return 2
if threat_level_id == '4':
return 0
return 0 | f7e89532664ee09f9d06f43e255cf7b495327b35 | 687,139 |
def project_using_projection_matrix(data_to_transform, projection_matrix):
"""
Projects given data into lower dimentional subspace using the provided
projection_matrix.
"""
projected_data = data_to_transform * projection_matrix;
return projected_data | 748f2098e00230328c301033af2fe36f2331ae9e | 687,140 |
import math
def constrain_angle(angle: float) -> float:
"""Wrap an angle to the interval [-pi, pi]."""
return math.atan2(math.sin(angle), math.cos(angle)) | f283fc8c87abd06492e3a2acc6da6dfc76db602c | 687,146 |
def get_meta_file_metainfo(img_file, labels):
"""Return meta information about image in 'img_file' file. This information
includes synset (str), numerical class label and human readeable set of
class labels (comma separated str)
"""
synset = img_file.split('/')[-2]
if synset not in labels:
raise ValueError("Invalid synset '%s: not found in labels dict." % synset)
return (synset, labels[synset]['label'], str(labels[synset]['human_labels'])) | 207a61de6421dc4734e96ff59ed5ea5c1f49c7b8 | 687,147 |
import itertools
def generate_final_heads(*iterables):
"""
Generate unique headers from files
:param iterables: headers file
:return: a unique set of headers
"""
return {
head for head in itertools.chain(*iterables)
} | 30aecb86f7fcb6355d552a6ef8ac7d277c1e1185 | 687,150 |
def bytearray_to_hex(data):
"""Convert bytearray into array of hexes to be printed."""
return ' '.join(hex(ord(byte)) for byte in data) | 7fce7b245f276d4a03d6db78ce87d9371e9afb3c | 687,151 |
def get_attr_connections(source_attr):
"""
It returns the inputs and outputs connections of an attribute.
:param source_attr: Attribute Object.
:return: dictionary with the inputs and outputs connections.
"""
return {'inputs': source_attr.inputs(p=True), 'outputs': source_attr.outputs(p=True)} | 400f0370287168a00c1bb9168bda1218e6bc6e6f | 687,152 |
def _to_lower(items):
"""
Converts a list of strings into a list of lower case strings.
Parameters
----------
items : list
A list of strings.
Returns
-------
The list of items all converted to lower case.
"""
return [item.lower() for item in items] | fbe7bccd1fd77089a5c2776a7d5911187ed840bb | 687,153 |
import codecs
def _convert_text_eb2asc(value_to_convert):
"""
Converts a string from ebcdic to ascii
:param value_to_convert: The ebcdic value to convert
:return: converted ascii text
"""
val = codecs.encode(codecs.decode(value_to_convert, "cp500"), "latin-1")
return val | 1f74909f8f615fdbf9431e4eb759bad778290b88 | 687,154 |
def get_all(isamAppliance, check_mode=False, force=False):
"""
Get management authorization - roles
"""
return isamAppliance.invoke_get("Get management authorization - roles",
"/authorization/roles/v1") | c84c622e903ee7bc79d167a71b87a6aabd4d093b | 687,156 |
def generate_global(keep, scores, check_keep, check_scores):
"""
Use a simple global threshold sweep to predict if the examples in
check_scores were training data or not, using the ground truth answer from
check_keep.
"""
prediction = []
answers = []
for ans, sc in zip(check_keep, check_scores):
prediction.extend(-sc.mean(1))
answers.extend(ans)
return prediction, answers | 98d0eea10c7c4b6ef260bb9d0263d3b5b3e794cb | 687,160 |
import pathlib
import sqlite3
def open_db(
path: pathlib.Path = pathlib.Path(__file__).parent.parent.joinpath(
"data"
).resolve()
) -> sqlite3.Connection:
"""Opens a connection to the bot_o_mat.db file.
Configures the row_factory to return a List[dict] instead of List[tuple].
Returns the connection object
:return: [description]
:rtype: sqlite3.Connection
"""
db_file: pathlib.Path = path.joinpath("bot_o_mat.db").resolve()
connection: sqlite3.Connection = sqlite3.connect(database=db_file)
# https://stackoverflow.com/a/49725294/12132366
# this is used for changing the return type from tuple -> dict
connection.row_factory = lambda c, row: {
col[0]: row[index]
for index, col in enumerate(c.description)
}
# must enable foreign_keys for each connection
connection.cursor().execute("PRAGMA foreign_keys = 1")
connection.commit()
return connection | 79816dc1152fee53bb7d1371cd15bd85211262d1 | 687,161 |
def isIterable(obj, returnTrueForNone=False):
"""
# To test out of the systemtools-venv:
isIterable(np.arange(0.0, 0.5, 0.1)
:example:
>>> isIterable([])
True
>>> isIterable([1])
True
>>> isIterable([None])
True
>>> isIterable(None)
False
>>> isIterable(None, returnTrueForNone=True)
True
>>> isIterable(1)
False
>>> isIterable({})
True
>>> isIterable("a")
True
>>> isIterable(random.randint)
False
"""
if obj is None:
if returnTrueForNone:
return True
else:
return False
try:
obj = iter(obj)
return True
except TypeError:
return False | d5fb00b998fe095a183c76714d08eb757958fec9 | 687,164 |
def vp_from_ke(m):
"""
Computes the vanishing point from the product of the intrinsic and extrinsic
matrices C = KE.
The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T
"""
return (m[0, 0]/m[2,0], m[1,0]/m[2,0]) | 7c695ea6c1a0a8a73a5dd2833756b0e5da70b8ef | 687,166 |
from typing import List
from typing import Dict
import yaml
def load_website_sources_list(
config_filename: str = "website_sources.yaml",
) -> List[Dict]:
"""
Loads a list of websites with attributes about those websites as a List of Dicts.
"""
# loads the websites to scrape from
with open(config_filename, "r") as fp:
return [data for data in yaml.safe_load_all(fp)] | 272da21ef146ad0c3c7bbe9c44f39fcee88192d3 | 687,169 |
def case_fold(text):
"""Converts text to lower case."""
return text.lower() | 65728e8c9ebec65f4b77786b6125136f2168d79a | 687,172 |
import torch
def smooth_l1_loss(pred, target, beta=1.0):
"""
Smooth L1 Loss introduced in [1].
Args:
pred (:obj:`torch.Tensor`): The predictions.
target (:obj:`torch.Tensor`): The learning targets.
beta (float, optional): The threshold in the piecewise function.
Default: ``1.0``.
Returns:
:obj:`torch.Tensor`: The loss tensor.
References:
1. Girshick et al. (https://arxiv.org/abs/1504.08083)
"""
if target.numel() == 0:
return pred.sum() * 0
assert beta > 0
assert pred.size() == target.size()
diff = (pred - target).abs()
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss | 77a98308c43e9c3c2c4fc723365d23870f6a90f3 | 687,173 |
def get_ta(tr, n_slices):
""" Get slice timing. """
return tr - tr/float(n_slices) | c1cba31153cd44ac6b72235b876da352c37eb7e2 | 687,175 |
def is_left(p0, p1, p2):
"""
Tests if a point is on left or right of an infinite line
input: three points p0, p1, p2
returns: >0 if p2 is left of line thru p0 and p1
=0 if p2 is on line
<0 if p2 is right of the line
"""
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y) | e756ee1c86cd7bbd050d238a1a0d9f1f761e8398 | 687,177 |
def get_message_index(response, message):
"""Returns the index of message in search response, -1 if not found"""
for n, result in enumerate(response):
if result.object == message:
return n
return -1 | 825324d54eeb7c19251f3aaa25a3ccb1c556f502 | 687,178 |
import math
import functools
import operator
def log_beta(x, y, tol=0.):
"""
Computes log Beta function.
When ``tol >= 0.02`` this uses a shifted Stirling's approximation to the
log Beta function. The approximation adapts Stirling's approximation of the
log Gamma function::
lgamma(z) ≈ (z - 1/2) * log(z) - z + log(2 * pi) / 2
to approximate the log Beta function::
log_beta(x, y) ≈ ((x-1/2) * log(x) + (y-1/2) * log(y)
- (x+y-1/2) * log(x+y) + log(2*pi)/2)
The approximation additionally improves accuracy near zero by iteratively
shifting the log Gamma approximation using the recursion::
lgamma(x) = lgamma(x + 1) - log(x)
If this recursion is applied ``n`` times, then absolute error is bounded by
``error < 0.082 / n < tol``, thus we choose ``n`` based on the user
provided ``tol``.
:param torch.Tensor x: A positive tensor.
:param torch.Tensor y: A positive tensor.
:param float tol: Bound on maximum absolute error. Defaults to 0.1. For
very small ``tol``, this function simply defers to :func:`log_beta`.
:rtype: torch.Tensor
"""
assert isinstance(tol, (float, int)) and tol >= 0
if tol < 0.02:
# At small tolerance it is cheaper to defer to torch.lgamma().
return x.lgamma() + y.lgamma() - (x + y).lgamma()
# This bound holds for arbitrary x,y. We could do better with large x,y.
shift = int(math.ceil(0.082 / tol))
xy = x + y
factors = []
for _ in range(shift):
factors.append(xy / (x * y))
x = x + 1
y = y + 1
xy = xy + 1
log_factor = functools.reduce(operator.mul, factors).log()
return (log_factor + (x - 0.5) * x.log() + (y - 0.5) * y.log()
- (xy - 0.5) * xy.log() + (math.log(2 * math.pi) / 2 - shift)) | 3147fdfaed82e7ba7141f158c0ee7c5d16f2ee12 | 687,180 |
def stations_by_river(stations):
"""Returns a dictionary mapping the names of rivers with a list of their monitoring stations"""
# Creates a dictionary of rivers that map to a list of their monitoring stations
rivers = dict()
for station in stations:
# Adds names of monitoring stations into the list under each river
if station.river is not None:
if station.river in rivers.keys():
rivers[station.river].append(station.name)
else:
rivers[station.river] = [station.name]
else:
pass
# Sorts the lists of monitoring stations alphabetically
for river in rivers.keys():
rivers[river].sort()
return rivers | abce331a1f76374216ba6a5b2e9a203c889fd8d6 | 687,183 |
def create_field_matching_dict(airtable_records, value_field, key_field = None, swap_pairs = False):
"""Uses airtable_download() output to create a dictionary that matches field values from
the same record together. Useful for keeping track of relational data.
If second_field is `None`, then the dictionary pairs will be {<record id>:value_field}.
Otherwise, the dictionary pairx will be {key_field:value_field}.
If swap_pairs is True, then dictionary pairs will be {value_field:<record id>(or key_field)}.
"""
airtable_dict = {}
for airtable_record in airtable_records:
if key_field == None:
key = airtable_record['id']
else:
key = airtable_record['fields'].get(key_field)
value = airtable_record['fields'].get(value_field)
if swap_pairs:
airtable_dict.update({key : value})
else:
airtable_dict.update({value : key})
return airtable_dict | df5fbf24edb7047fc569b10a73eec47fb1234fcd | 687,184 |
def gen_rect(t, b, l, r):
"""
:param t: top latitude
:param b: bottom latitude
:param l: left longitude
:param r: right longitude
:return: GeoJSON rect with specified borders
"""
ret = {
'type': 'Feature',
'properties': {},
'geometry': {
'type': 'Polygon',
'coordinates': [[[l, t], [r, t], [r, b], [l, b], [l, t]]]
}
}
return ret | c93b12cd668316977437813e85bea0bb92ee8cd8 | 687,187 |
import torch
def calculate_P_pi(P, pi):
"""
calculates P_pi
P_pi(s,t) = \sum_a pi(s,a) p(s, a, t)
:param P: transition matrix of size |S|x|A|x|S|
:param pi: matrix of size |S| x |A| indicating the policy
:return: a matrix of size |S| x |S|
"""
return torch.einsum('sat,sa->st', P, pi) | 609fc1361884cda9f89ee77fe5e7de2d08bf1f61 | 687,189 |
import copy
def exact_to_1st_order_model(model):
"""Convert model training on exact augmented objective to model training on
1st order approximation.
"""
model_1st = copy.deepcopy(model)
model_1st.approx = True
model_1st.feature_avg = True
model_1st.regularization = False
return model_1st | 4ab21851eb6c8be9915efe1b06f01b5b4c621643 | 687,192 |
def kmp(S):
"""Runs the Knuth-Morris-Pratt algorithm on S.
Returns a table F such that F[i] is the longest proper suffix of S[0...i]
that is also a prefix of S[0...i].
"""
n = len(S)
if n == 0:
return []
F = [0] * n
for i in range(1, n):
k = F[i - 1]
while k > 0 and S[k] != S[i]:
k = F[k - 1]
if S[k] == S[i]:
k += 1
F[i] = k
return F | 3989f51253abbb497f86031316f27e62019a4528 | 687,194 |
def quat_real(quaternion):
"""Return real part of quaternion.
>>> quat_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0]) | 9b472adf7afebfb7b216afe4b9f1d360732afc2a | 687,196 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.