content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import six
def flatten_dict(original_dict, delimiter="/"):
"""Flattens a dictionary of dictionaries by one level.
Note that top level keys will be overridden if they collide with flat keys.
E.g. using delimiter="/" and origial_dict={"foo/bar": 1, "foo": {"bar": 2}},
the top level "foo/bar" key would be overwritten.
Args:
original_dict: The dictionary to flatten.
delimiter: The value used to delimit the keys in the flat_dict.
Returns:
The falttened dictionary.
"""
flat_dict = {}
for outer_key, inner_dict in six.iteritems(original_dict):
if isinstance(inner_dict, dict):
for inner_key, value in six.iteritems(inner_dict):
flat_dict["{}{}{}".format(outer_key, delimiter, inner_key)] = value
else:
flat_dict[outer_key] = inner_dict
return flat_dict | af4bd771e4b8645d1cb98222422753c8c3e0e563 | 692,241 |
def safe_lower(txt):
""" Return lowercased string. Return '' for None """
if txt:
return txt.lower()
else:
return "" | b54b8624a28e5cc5fba3c49e84f371f87abb4f55 | 692,245 |
def get_from_module(module_params, module_name, identifier):
"""Gets a class/instance of a module member specified by the identifier.
Args:
module_params: dict, contains identifiers
module_name: str, containing the name of the module
identifier: str, specifying the module member
Returns:
a class or an instance of a module member specified
by the identifier
"""
res = module_params.get(identifier.lower())
if res is None:
raise ValueError("Invalid {} identifier!".format(module_name), identifier)
return res | 2a31ad8f4eeb434fcdf37ae4056b9df1b868a212 | 692,247 |
def lookup_counts(
row,
lookup_table,
index="step",
columns="participant",
default=False
):
"""
Function to apply to a DataFrame to cross-reference
counts in a lookup_table.
Parameters
----------
row: Series
row of a DataFrame
lookup_table: DataFrame
DataFrame to cross-reference
index: string or numeric, opitional
name of column in row that contains an index value
for lookup_table, default = "step"
columns: string or numeric, opitional
name of column in row that contains a column name
for lookup_table, default = "participant"
default: boolean or other, optional
value to return if lookup not in lookup table
default = False
Returns
-------
value: boolean or other
the value at index, columns; otherwise default
"""
try:
return(
lookup_table.loc[
row[index],
row[columns]
].all()
)
except:
return(default) | 14526341270e49b3beee5ae49083b83835d7a2e9 | 692,252 |
import re
def normalize(obj):
"""Normalize output object.
Args:
obj: Google Test's JSON output object to normalize.
Returns:
Normalized output without any references to transient information that may
change from run to run.
"""
def _normalize(key, value):
if key == 'time':
return re.sub(r'^\d+(\.\d+)?s$', '*', value)
elif key == 'timestamp':
return re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\dZ$', '*', value)
elif key == 'failure':
value = re.sub(r'^.*[/\\](.*:)\d+\n', '\\1*\n', value)
return re.sub(r'Stack trace:\n(.|\n)*', 'Stack trace:\n*', value)
else:
return normalize(value)
if isinstance(obj, dict):
return {k: _normalize(k, v) for k, v in obj.items()}
if isinstance(obj, list):
return [normalize(x) for x in obj]
else:
return obj | 391090c29f650aa98cd9781ff9727fe636496097 | 692,257 |
def counting_sort(arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/countingsort2/problem
Given an unsorted list of integers, use the counting sort method to sort the list and then print the sorted list.
Args:
arr (list): List of integers to sort
Returns:
list: The list of integers in sorted ascending order
"""
sorted_list = []
counted_list = [0] * 100
for i in arr:
counted_list[i] += 1
for idx, num in enumerate(counted_list):
for _ in range(num):
sorted_list.append(idx)
return sorted_list | 18f6b91121ca4101a5c62f03a58908e4f719417b | 692,259 |
def polynomial_add_polynomial(a, b):
"""
Addition function of two polynomials.
:param a: First polynomial.
:param b: Second polynomial.
:return: The result of adding two polynomials
"""
len_a, len_b = len(a), len(b)
if len_a < len_b:
a, b, len_a, len_b = b, a, len_b, len_a
return [a[i] + b[i] for i in range(len_b)] + a[len_b:] | 3ef0d4ba1f2b47a8f0ee73f27b0f2b0697ab8920 | 692,260 |
def format_face_coords(ibm_analyze_result):
"""
Parse the face coords extracted from IBM service_v4.
:param ibm_analyze_result: the json object directly returned from IBM face detection service_v4
see an example in "watson_experiment/sample_face_and_result/sample_output.json"
:return: a list of location, each looks like
{
"left": 64,
"top": 72,
"width": 124,
"height": 151
},
"""
outer_objects = ibm_analyze_result['images'][0]['objects']
if not outer_objects: # i.e. dictionary is empty, no face detected
return []
else:
objects = outer_objects['collections'][0]['objects']
return [obj['location'] for obj in objects] | d633db1963b17bf07a47284eba1fcd4448afda75 | 692,263 |
def apply_threshold(heatmap, threshold):
"""
Simple unitliy function which encapsulates heap-map thresholding algorithm
:param heatmap:
:param threshold:
:return:
"""
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap | 65fcd28738660c1e3761b3ea554cf854287bf432 | 692,265 |
from docutils.core import publish_parts
def do_rst(s):
"""
Parse the string using the reStructuredText parser from the
docutils package.
requires `docutils`_.
.. _docutils: http://docutils.sourceforge.net/
"""
parts = publish_parts(source=s, writer_name='html4css1')
return parts['fragment'] | 071ba7bab66b696eed1d3e92f5f163d24ee55cf6 | 692,267 |
def first(*args):
"""
Returns first not `None` argument.
"""
for item in args:
if item is not None:
return item | e91ef779047272e8e4892b28adc4dc57c83df1dd | 692,268 |
def Q_wastecooler(W_mass, Cw, t_coolwater_exit, tw):
"""
Calculates the heat load of waste cooler.
Parameters
----------
W_mass : float
The mass flow rate of waste, [kg/s]
Cw : float
The heat capacity of waste, [J/(kg * degrees C)]
t_coolwater_exit: float
The end temperature of cool waste, [degrees C]
tw : float
The temperature of boiling waste, [degrees celcium]
Returns
-------
Q_wastecooler : float
The heat load of waste cooler, [W] , [J/s]
References
----------
Дытнерский, формула 2.2, стр.45
"""
return W_mass * Cw * (tw - t_coolwater_exit) | 2f64677976e02649d1129b9482c76c787bdf3fdd | 692,269 |
def rgb_to_xyz(red, green, blue):
"""
Convert standard RGB color to XYZ color.
:arg int red: RGB value of Red.
:arg int green: RGB value of Green.
:arg int blue: RGB value of Blue.
:returns: Tuple (X, Y, Z) representing XYZ color
:rtype: tuple
D65/2° standard illuminant
"""
rgb = []
for val in red, green, blue:
val /= 255.0
if val > 0.04045:
val = pow((val + 0.055) / 1.055, 2.4)
else:
val /= 12.92
val *= 100
rgb.append(val)
red, green, blue = rgb # pylint: disable=unbalanced-tuple-unpacking
x_val = red * 0.4124 + green * 0.3576 + blue * 0.1805
y_val = red * 0.2126 + green * 0.7152 + blue * 0.0722
z_val = red * 0.0193 + green * 0.1192 + blue * 0.9505
return x_val, y_val, z_val | dfd5048a64ef6a9f711fe6416d001481bce01511 | 692,270 |
def map_to_ta_modes(ins, max_group, min_group):
"""Turns the min/max groups into the closest allowable
TA group mode.
Parameters
----------
ins : str
Instrument.
max_group : int
The maximum number of groups without oversaturating.
min_group : int
The groups needed to hit the target SNR.
Returns
-------
min_ta_groups : int
The min possible groups to hit target SNR.
max_ta_groups : int
The max possible groups before saturation.
"""
# Allowable group modes for each ins
groups = {'miri': [3, 5, 9, 15, 23, 33, 45, 59, 75, 93, 113, 135, 159, 185, 243, 275, 513],
'niriss': [3, 5, 7, 9, 1, 13, 15, 17, 19],
'nirspec': [3],
'nircam': [3, 5, 9, 17, 33, 65]
}
# Match the literal min and max groups to the nearest mode.
allowable_groups = groups[ins]
min_ta_groups = min(allowable_groups, key=lambda x:abs(x-min_group))
max_ta_groups = min(allowable_groups, key=lambda x:abs(x-max_group))
# Unless it was oversaturated from the get-go OR there aren't enough groups
# for SNR
if min_group == 0:
min_ta_groups = 0
max_ta_groups = 0
if min_group > max(allowable_groups):
min_ta_groups = -1
max_ta_groups = 0
# BOTH ARE FLIPPED RN -- I WILL FLIP BOTH BACK SOON...
return max_ta_groups, min_ta_groups | 7f661ce556a20903ede5815e5bbe99918746298d | 692,272 |
def compute_gc_content(dna):
"""
computes GC-content of dna (the percentage of its bases that are either cytosine or guanine).
Args:
dna (str): DNA string.
Returns:
int: GC-content of dna.
"""
gc = dna.count("G") + dna.count("C")
gc_content = gc * 100 / len(dna)
return gc_content | 556e1f0d481b624e24b97a0eca359871496648d0 | 692,275 |
def prettycase(var): # Some Variable
"""
Pretty case convention. Include space between each element and uppercase the first letter of element.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``Pretty Case`` convention.
"""
result = ""
for i, element in enumerate(var):
element = list(element)
element[0] = element[0].upper()
result += "".join(element) + " "
return result[:-1] | c993d9dcc4fb65961887b9f0eb9e7928cc41ca11 | 692,277 |
def plot_data(d, field):
"""
Return the x and y series to be used for plotting
Args:
d (OrderedDict)
field (str)
Returns:
Tuple[list, list]:
[0] The x-series
[1] The y-series
"""
return ([year for year in d.keys() if d[year][field] is not None],
[i[field] for k, i in d.items() if i[field] is not None]) | a5c7de18f1da44da0641362a5ace24f7bc35c1b5 | 692,282 |
from bs4 import BeautifulSoup
from typing import List
def find_data_rows(soup: BeautifulSoup) -> List[BeautifulSoup]:
"""Queries the provided BeautifulSoup to find <tr> elements which are inside a <tbody>.
Exploring the data shows that such rows correspond to vaccine site data
"""
def is_data_row(tag):
return tag.name == "tr" and tag.parent.name == "tbody"
return soup.find_all(is_data_row) | 34760102227c6daceb50f17befeae9f9534efebb | 692,283 |
def keys_from_position(position, width, height=None):
"""
Generate a set of keys suitable to be used on a map generator to get a
horizontal rectangular slice of a (t,x,y,z) space centered on position.
(Returns a tuple (float, slice, slice, float).
"""
if height is None:
height = width
return (position[0],
slice(position[1] - width/2, position[1] + width/2),
slice(position[2] - height/2, position[2] + height/2),
position[3]) | c5e6e8d20aa5769fddb51b62b812b2a8913c053b | 692,285 |
def evaluate(env,
policy,
num_episodes = 10,
video_filename = None,
max_episodes_per_video = 5,
return_distributions=False,
return_level_ids=False):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
video_filename: If not None, save num_episodes_per_video to a video file.
max_episodes_per_video: When saving a video, how many episodes to render.
return_distributions: Whether to return per-step rewards and episode return
distributions instead of mean
return_level_ids: Whether to return level ids to agent in ProcGen.
Returns:
Averaged reward and a total number of steps.
"""
del video_filename # placeholder
del max_episodes_per_video
total_timesteps = 0.
total_returns = 0.0
total_log_probs = 0.0
return_acc = []
reward_acc = []
for _ in range(num_episodes):
episode_return = 0.
episode_log_prob = 0.
episode_timesteps = 0.
timestep = env.reset()
while not timestep.is_last():
if type(policy).__name__ == 'TfAgentsPolicy':
action, log_probs = policy.act(timestep.observation)
episode_log_prob += log_probs.numpy().item()
else:
if return_level_ids:
action = policy.act(timestep.observation, env._infos[0]['level_seed']) # pylint: disable=protected-access
else:
action = policy.act(timestep.observation)
if hasattr(action, 'numpy'):
action = action.numpy()
timestep = env.step(action)
total_returns += timestep.reward[0]
episode_return += timestep.reward[0]
total_timesteps += 1.0
episode_timesteps += 1.0
reward_acc.append(timestep.reward[0])
episode_log_prob /= episode_timesteps
total_log_probs += episode_log_prob
return_acc.append(episode_return)
if return_distributions:
return (reward_acc, return_acc,
total_timesteps / num_episodes, total_log_probs / num_episodes)
if type(policy).__name__ == 'tfAgentsPolicy':
return (total_returns / num_episodes,
total_timesteps / num_episodes, total_log_probs / num_episodes)
else:
return total_returns / num_episodes, total_timesteps / num_episodes | 4948cb3058c85a016167a350a5b6c076ae09174f | 692,290 |
def str_upper_lookup(full_match: str, inner_group: str) -> str:
"""Handles the cases of: ${upper:aaAAaa}"""
# ignore default values
inner_group = inner_group.split(":-", 1)[0]
return inner_group.split(":", 1)[1].upper() | c6ff0bddea0e3e4eb318367190ae0af568e1dfe1 | 692,291 |
def in_group(user, groups):
"""Returns a boolean if the user is in the given group, or comma-separated
list of groups.
Usage::
{% if user|in_group:"Friends" %}
...
{% endif %}
or::
{% if user|in_group:"Friends,Enemies" %}
...
{% endif %}
"""
group_list = (groups).split(',')
try:
# print group_list, user.groups.filter(), user.groups.values('name'), user
return bool(user.groups.filter(name__in=group_list).values('name'))
except:
return False | e1b040f4e3534bd7198a5de69be21e6a94cbad4f | 692,302 |
import math
def atm_pressure(altitude):
"""
Estimate atmospheric pressure from altitude.
Calculated using a simplification of the ideal gas law, assuming 20 degrees
Celsius for a standard atmosphere. Based on equation 7, page 62 in Allen
et al (1998).
:param altitude: Elevation/altitude above sea level [m]
:return: atmospheric pressure [kPa]
:rtype: float
"""
temp = (293.0 - (0.0065 * altitude)) / 293.0
return math.pow(temp, 5.26) * 101.3 | 1a90988d13197b370ab971e92af6caaa864a2f8f | 692,305 |
def scale(value):
"""Scale an value from (acceleration range) to 0-255 (RGB range)"""
value = abs(value)
value = max(min(19.6, value), 0)
return int(value / 19.6 * 255) | 4b6665681294a301069ce0462e9b9dc2cc05f8bf | 692,307 |
def translate(bbox, x_offset=0, y_offset=0):
"""Translate bounding boxes by offsets.
Parameters
----------
bbox : numpy.ndarray
Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes.
The second axis represents attributes of the bounding box.
Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`,
we allow additional attributes other than coordinates, which stay intact
during bounding box transformations.
x_offset : int or float
Offset along x axis.
y_offset : int or float
Offset along y axis.
Returns
-------
numpy.ndarray
Translated bounding boxes with original shape.
"""
bbox = bbox.copy()
bbox[:, :2] += (x_offset, y_offset)
bbox[:, 2:4] += (x_offset, y_offset)
return bbox | 9e0950b11b883b77122f77ed512ca4b84a4a6af4 | 692,308 |
def get_width_height(image):
""" Returns a tuple (width, height) indicating the width and height of the image."""
width = len(image)
height = len(image[0])
return width, height | 4b45ec92d3a4b400d3b106e63d56fcbdef297760 | 692,309 |
import logging
def get_logger(name):
"""
Return a logger that will contextualize the logs with the name.
"""
logger = logging.getLogger(name)
return logger | f4ca7829ba0794f276e83b9b4f7394d1eebed416 | 692,311 |
def get_max_temp(liveness, args):
"""Returns sum of maximum memory usage per tile of temporary variables."""
return sum(liveness["notAlwaysLive"]["maxBytesByTile"]) | 8adbcca61a2256f6255fc6f1c7c5246bb18818a0 | 692,314 |
def fail(test, msg=None):
"""Create failure status and message object
:param test: test with status to be altered
:param msg: optional message for failure reason
:return: updated test object
"""
if msg is None:
msg = 'Test failed'
test['status'] = 'FAILURE'
test['message'] = msg
return test | 0d4f7971ffbea4347fe1a9de2cde067523a67898 | 692,315 |
def claims(oauth2_settings):
"""Set up web tokens claims options
Accepts:
oauth2_settings(dict): dictionary of OAuth2 settings
Returns:
claims(dict): a dictionary describing json token web claims
"""
claims = dict(
iss=dict(essential=True, values=",".join(oauth2_settings.get("issuers", []))),
aud=dict(
essential=oauth2_settings.get("verify_aud", False),
values=",".join(oauth2_settings.get("audience", [])),
),
exp=dict(essential=True),
)
return claims | 8835302fa3ac983d22dd9ce87b118cc314fe0910 | 692,318 |
def transform_job_describe(sm_client, transform_job_name):
"""API call to describe a batch-transform inference job."""
try:
response = sm_client.describe_transform_job(TransformJobName=transform_job_name)
return response
except sm_client.exceptions.ResourceNotFound:
raise Exception(f"Transform job not found: '{transform_job_name}'") | 4ccc44c795f0fa157b942512ad9fa332ea2b2c22 | 692,320 |
def numval(token):
"""Return the numerical value of token.value if it is a number"""
if token.type == 'INTEGER':
return int(token.value)
elif token.type == 'FLOAT':
return float(token.value)
else:
return token.value | ca92bc7f7a0ae10705284ca7a0da772242bba35c | 692,325 |
import textwrap
def _center_wrap(text: str, cwidth: int = 80, **kw) -> str:
"""Centers a text.
Args:
text (str): Text to center.
cwidth (int): Wanted width. Defaults to 80.
**kw: Arguments of textwrap.wrap
Returns:
str: Centered text.
"""
lines = textwrap.wrap(text, **kw)
return "\n".join(line.center(cwidth) for line in lines) | 5dad799732cebbcd3061b711b34e367e46f0d4b0 | 692,326 |
def is_even(n):
"""
Determines if the given integer or float input is even.
:param n: an float or integer value.
:return: True if the integer is even, otherwise false.
"""
return n % 2 == 0 | 3dc632c0379aa62035dbcd84081e29f1e2f88c26 | 692,327 |
def define_field(key: str, pattern: str = r'[^\s\'"]+') -> str:
"""Gives a regex pattern for a key/value with a named group for value; 'pattern'
is the regex for the value. The value may optionally be single or double quoted.
E.g. key='map', pattern='\w+' will match "map 'e'" and groupdict()['map']='e'
"""
return rf"""^{key}(?:\s+|\s*[:=]\s*)(?:"(?={pattern}")|'(?={pattern}'))?(?P<{key}>{pattern})['"]?$""" | 911f0f59d9c4759339d1020512bdfedffda7da7e | 692,328 |
def true_positive(a, b):
""" Return quantity of TP - True Positives
What is in A and B
being A the set of Positive prediction
and B the set of Actual Positive """
tp = 0
for item in a:
if item in b:
tp += 1
return tp | f41c33574cd4b32f3899404043257c3ed10d0ead | 692,331 |
def hwc2chw(image):
"""
Changes the order of image pixels from Height-Width-Color to Color-Height-Width
Parameters
-------
image : numpy.ndarray
Image with pixels in Height-Width-Color order
Returns
-------
image : numpy.ndarray
Image with pixels in Color-Height-Width order
"""
return image.transpose((2, 0, 1)) | afb1b555b7d433d1563210090b8c70e33f490cfe | 692,333 |
def dict_deep_overlay(defaults, params):
"""If defaults and params are both dictionaries, perform deep overlay (use params value for
keys defined in params), otherwise use defaults value"""
if isinstance(defaults, dict) and isinstance(params, dict):
for key in params:
defaults[key] = dict_deep_overlay(defaults.get(key, None), params[key])
return defaults
return params | 415e43a5ed36a5a53f0c19cda4a873b1671dbe6c | 692,334 |
def get_normalized_distance (pair,s_pronoun) :
"""Normalized distance: if the antecedent and the mention are the same sentence normalize, otherwise 0"""
if pair[2] == pair[3] :
distance = abs(int(pair[1][2]) - int(pair[0][2]))
return round(distance / len(s_pronoun), 3)
return 0 | 9ab61dde3642a755a5e14f2853f3e97d82b1e95b | 692,335 |
def _split_name(name):
"""Splits a name in two components divided by '.'"""
comp = name.split('.')
if len(comp) > 1:
return (comp[0], '.'.join(comp[1:]))
return (None, name) | 2c4e771a18325dfd518b2f174567adabc773685c | 692,339 |
def findTop(node):
"""
_findTop_
Ignoring tree structure, find the top node that contains the node
provided.
Will work for any ConfigSection, not limited to ConfigSectionTree
"""
if node._internal_parent_ref == None:
return node
return findTop(node._internal_parent_ref) | a534938f320a6fd0de5dd14168a49073c11cafba | 692,341 |
def _collect_generated_proto_go_sources(target):
"""Returns a depset of proto go source files generated by this target."""
if not hasattr(target, "aspect_proto_go_api_info"):
return None
go_proto_info = target.aspect_proto_go_api_info
files = getattr(go_proto_info, "files_to_build", [])
return [f for f in files if f.basename.endswith(".pb.go")] | f61459277cc5debe7a46abccfba92ea0ac07fe58 | 692,344 |
def abbreviate_path(path):
"""Abbreviate path (replace with first letters of parts)."""
return ''.join(x[0] for x in path.parts) | 1c44728a3dda22d33c4d616c17526a9740ecbe65 | 692,347 |
def sum_values(*values):
"""Return the sum of values, considering only elements that are not None.
An item v,w in values can be anything that contains __add__ function
such that v+0 and v+w is defined.
"""
# Cannot simply return sum([v for v in values if v is not None])
# because it does 0 + v which will not work for v of type, e.g., VectorCost
current = 0
for v in values:
if v is not None:
current = v + current
return current | 5db7d60b5486a29d325ca26d967150084075ab7b | 692,350 |
def isfunction(f):
"""
Tests if argument is a function
"""
return callable(f) | eeed12fc035b9017f6a3fc43c820d0dbc17f745f | 692,351 |
import random
def mutate(gene, threshold):
""" mutate gene
Arguments:
----------
gene {list[int]} -- gene
threshold {float} -- threshold for mutating
Returns:
--------
new_gene {list[int]} -- new gene
Examples:
---------
>>> gene = [4, 3, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.5)
... print(new_gene)
[0, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[2, 3, 2, 1, 0]
[4, 0, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.1) # change thresold to 0.1
... print(new_gene)
[4, 2, 2, 1, 0]
[4, 3, 0, 1, 0]
[4, 1, 2, 1, 0]
[4, 0, 2, 1, 0]
[2, 3, 2, 1, 0]
>>> for _ in range(5):
... new_gene = mutate(gene, 0.9) # change thresold to 0.9
... print(new_gene)
[2, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]
[4, 3, 2, 1, 0]]
"""
mutate_prob = random.random()
if mutate_prob < threshold:
new_gene = gene
else:
length = len(gene)
mutate_pos = random.randint(0, length-2)
new_gene = gene.copy()
while True:
new_value = random.randint(0, length-mutate_pos-1)
if not new_value == gene[mutate_pos]:
new_gene[mutate_pos] = new_value
break
return new_gene | c57dbecec05fb4a2bffbb9606bc9ebf142aa5bc0 | 692,356 |
def getSQLT(timestamp):
"""Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER.
:param timestamp:
:return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER"""
# I know this is a very small function, but now it's clear what SQL needs
return int(timestamp) | 6fb7a1ede4b9bcbc3a92039c130a2fe557346079 | 692,357 |
import logging
def get_logger(logger_name, propagate=True):
"""
This function provides a simple wrapper to add a null handler to the logger
requested so that we make sure not to dump stuff to terminal as defined by
default configuration in case the library user do not want to use logging
(or didn't care about configuring it).
Args:
logger_name (str): the logger instance name (usually the module name
with __name__)
propagate (bool): whether to propagate the messages up to ancestor
loggers
Returns:
logging.Logger: Logger instance
Raises:
None
"""
# if logger instance does not exist it will be created upon call
logger = logging.getLogger(logger_name)
logger.propagate = propagate
# add the null handler to make sure nothing is written in case user didn't
# configure logging
logger.addHandler(logging.NullHandler())
return logger | 74bf3497b7fb062182bdd56b9e6ffb8d550512dd | 692,358 |
import re
def get_slide_id(url):
"""
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit#slide=id.g4c7fe486b7_0_0
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit#slide=id.g4f00846b3a_0_0
https://docs.google.com/presentation/d/1N8YWE7ShqmhQphT6L29-AcEKZfZg2QripM4L0AK8mSU/edit
"""
res = re.findall("edit#slide=id.(.+?)$", url)
if res:
return res[0]
return None | accfe0785ec173a742549c500b06844f1ae09cf5 | 692,359 |
import warnings
def _determine_cy_weighting_func(weighting_function):
""" Determine cython weight function value. """
if weighting_function.upper() == 'GRIDRAD':
cy_weighting_function = 4
elif weighting_function.upper() == 'BARNES2':
cy_weighting_function = 3
elif weighting_function.upper() == 'NEAREST':
cy_weighting_function = 2
elif weighting_function.upper() == 'CRESSMAN':
cy_weighting_function = 1
elif weighting_function.upper() == 'BARNES':
warnings.warn("Barnes weighting function is deprecated."
" Please use Barnes 2 to be consistent with"
" Pauley and Wu 1990.", DeprecationWarning)
cy_weighting_function = 0
else:
raise ValueError('unknown weighting_function')
return cy_weighting_function | 124f47519402cb07e1d0efc600802d22fd4729c7 | 692,363 |
def _get_val(row, input_arr):
"""Small function for extracting values from array based on index.
"""
i, j, k = row
return input_arr[i, j, k] | 190fee22f78be9f7d8f26bee7c783a2b225a7513 | 692,377 |
def get_enz_remnant(enz):
"""Get enzyme recognition site remnant sequence"""
if enz.ovhg > 0:
remnant = enz.site[enz.fst3:]
return [remnant,remnant.replace('G','A')]
else:
remnant = enz.site[enz.fst5:]
return [remnant,remnant.replace('C','T')] | 5b76c23b3b3a4b853927bdbc6ab267d20781b7ba | 692,378 |
def get_dimorder(dimstring):
"""Get the order of dimensions from dimension string
:param dimstring: string containing the dimensions
:type dimstring: str
:return: dims_dict - dictionary with the dimensions and its positions
:rtype: dict
:return: dimindex_list - list with indices of dimensions
:rtype: list
:return: numvalid_dims - number of valid dimensions
:rtype: integer
"""
dimindex_list = []
dims = ['R', 'I', 'M', 'H', 'V', 'B', 'S', 'T', 'C', 'Z', 'Y', 'X', '0']
dims_dict = {}
for d in dims:
dims_dict[d] = dimstring.find(d)
dimindex_list.append(dimstring.find(d))
numvalid_dims = sum(i > 0 for i in dimindex_list)
return dims_dict, dimindex_list, numvalid_dims | 9f41f4697e4db75c97db7ad3cb06012867ce2d55 | 692,380 |
def apply_ants_transform_to_vector(transform, vector):
"""
Apply transform to a vector
ANTsR function: `applyAntsrTransformToVector`
Arguments
---------
vector : list/tuple
vector to which the transform will be applied
Returns
-------
tuple : transformed vector
"""
return transform.apply_to_vector(vector) | 677b44258569dad6cb193e8a802bd3912cfd85dd | 692,381 |
def product(numbers):
"""Calculates the product of a series of numbers."""
if not product:
raise ValueError("product of empty sequence")
result = 1
for n in numbers:
result *= n
return result | 36f6d5cf35aa1f929a01d4f060fbf82238093c86 | 692,382 |
from typing import List
from typing import Dict
def flatten_dicts(dicts: List[Dict]) -> Dict:
"""Flatten a list of dicts
Args:
dicts (list): list of dicts
Returns:
dict: flattened dict
"""
agg_dict = {}
for d in dicts:
for k, v in d.items():
agg_dict.setdefault(k, []).append(v)
return agg_dict | ba7154936323e924a76a083f878930dbf6533502 | 692,383 |
def readFile(filename, split=False):
""" Read the file and return it as a string
Parameters
----------
@param filename - path to the file
@param split - whether to split at newline or not, default False
Returns
----------
@param s - either a string (if split==False) or a list of strings (if split==True)
representing the entire file or its subsequent lines, respectively
"""
with open(filename,'r') as f:
# concentrate the file as a string
s = ''
for line in f:
s += line
f.close()
if split:
s = s.split("\n")
return s | 8f8aafa7e05d8098812bee94b65590e312453b4e | 692,384 |
def selector(expression):
"""If the expression is true, return the string 'selected'.
Useful for HTML <option>s.
"""
if expression:
return "selected"
else:
return None | 7151506a127f185a706f3dfcfce38a73d673277e | 692,388 |
def parse_fp(file):
"""parse .fp files. returns a 3D array (nested lists):
year x stock x fishery.
The original base.fp file, for instance, returns a 39x30x25 array."""
slices = file.read().strip().replace("\r", "").split("\n\n")
return [
[[float(s) for s in line.split()] for line in slice.splitlines()]
for slice in slices
] | d77ff4874e8887ef67081b701f807bb31af3d327 | 692,395 |
from functools import reduce
def excel_col_letter_to_index(x):
"""
Convert a 'AH','C', etc. style Excel column reference to its integer
equivalent.
@param x (str) The letter style Excel column reference.
@return (int) The integer version of the column reference.
"""
return reduce(lambda s,a:s*26+ord(a)-ord('A')+1, x, 0) | 800f650118c1216727be18722aa4b435ec34b56f | 692,397 |
def _max(*args):
"""Returns the maximum value."""
return max(*args) | 92e25771df097b8d01692011b5a87560c83055c8 | 692,404 |
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
red_avg = sum([x.red for x in pixels])//len(pixels)
green_avg = sum([x.green for x in pixels])//len(pixels)
blue_avg = sum([x.blue for x in pixels])//len(pixels)
return [red_avg, green_avg, blue_avg] | 2cae3992f3a7a3d0d9ef3feb84221a4964221bbf | 692,405 |
def distribute_atoms(atoms, n):
""" split a 1D list atoms into n nearly-even-sized chunks.
"""
k, m = divmod(len(atoms), n)
return [atoms[i*k+min(i,m) : (i+1)*k+min(i+1,m)] for i in range(n)] | 12a21245f2e1cb412bdb35aadf3c7d130d11107f | 692,409 |
def get_perm_indices_path(data_dir, data_fn):
"""Get path of pickled perm_indices file."""
return '{}/{}_perm_indices.pkl'.format(data_dir, data_fn) | efbb539cdb1c5bc9ad5e00560a135cd0c7608741 | 692,411 |
def bytes2MB(bytes):
"""
Convert bytes to MB.
:param bytes: number of bytes
:type bytes: int
:return: MiB
:rtype: float
"""
return bytes/(1000*1000) | 33a3ff86cb8422914ed08437521c1fe0b7d49610 | 692,415 |
import torch
def collate_fn_customer(batch):
"""
这个函数的作用是将读取到的batch中的多组数据,融合成整体
也就是增加一个batch维度
"""
images = []
bboxes = []
for i, data in enumerate(batch):
# data[0]为img维度
images.append(data[0])
# data[1]为bbox维度
bboxes.append(data[1])
#images类型转换:list==>torch.tensor
images = torch.stack(images)
batch = (images, bboxes)
return batch | 482d8d5a5405d2413d0caa8faf2877138d7fd2a0 | 692,416 |
def get_course_info_usage_key(course, section_key):
"""
Returns the usage key for the specified section's course info module.
"""
return course.id.make_usage_key('course_info', section_key) | 7a9b661241dcfa428e7f4f19b3db0bc162fea041 | 692,418 |
import io
def optimize_lossless_webp(image):
"""Encode image to lossless WEBP using Pillow.
:param PIL.Image image: The image to encode.
:returns: The encoded image's bytes.
"""
image_io = io.BytesIO()
image.save(
image_io,
format="WEBP",
lossless=True,
quality=100,
method=6,
)
image_io.seek(0)
return image_io.read() | 11f5d9b9f2240e463494b9d395079a8884a04cc9 | 692,419 |
def degree_to_n_coeffs(degree):
"""how many coefficients has a 2d polynomial of given degree"""
return int((degree+1)*(degree+2)/2.+0.5) | 3558e2d68d16ffce0df31a3cc494c160f965a7ed | 692,422 |
from typing import Tuple
import colorsys
def get_rgb_from_value(v: float) -> Tuple[int, int, int]:
"""Returns a 3-tuple of rgb values based on the input float.
The input float should be between 0 and 1 and is interpreted as the
hue value in an HSL to RGB color conversion.
"""
# colorsys returns rgb values between 0 and 1
r, g, b = colorsys.hls_to_rgb(v, 0.5, 1)
# multiply by 255 to get values between 0 and 255
red = round(r * 255)
green = round(g * 255)
blue = round(b * 255)
return red, green, blue | 3ad5bdf262717f3a77fdeeaa1d4cb98fd5f5f0bb | 692,423 |
def __get_type_NHC(code):
"""
Get the intensity category according to the status of system defined by
"National Hurricane Center".
Reference:
https://www.nhc.noaa.gov/data/hurdat/hurdat2-format-nov2019.pdf
0 - Subtropical cyclone of depression intensity;
Subtropical cyclone of storm intensity;
A low that is neither a TC, a subtropical cyclone, nor an EC;
Tropical wave;
Disturbuance (OTHERS, unknown intensity)
1 - Tropical depression (TD, <34 knots)
2 - Tropical storm (TS, 34-63 knots)
3 - Hurricane (HU, >64 knots)
4 - Extratropical cyclone (EC, any intensity)
Parameters
----------
code : str
A string code represents the type.
Returns
-------
re: str
One of the type in ['TD', 'TS', 'HU', 'EC', 'OTHERS'].
"""
return code | a59d97fa473dc5faea7d8d7c61a5d5766500e6dd | 692,425 |
import hashlib
def get_file_hash(filename):
""" Return a str obj with the hex representation of the hash of the file with the given filename.
(assuming the file does exist and is readable. exception is raised otherwise)
"""
READ_SIZE = 8192 * 4
srcfile = open(filename, 'rb')
hash_func = hashlib.sha256()
buf = srcfile.read(READ_SIZE)
while len(buf) > 0:
hash_func.update(buf)
buf = srcfile.read(READ_SIZE)
return hash_func.hexdigest() | fc3267259cb5515961c2730e09b30b9423bd2b00 | 692,428 |
def r(b, p, alpha):
"""
Function to calculate the r coeficient of the Massman frequency correction.
"""
r = ((b ** alpha) / (b ** alpha + 1)) * \
((b ** alpha) / (b ** alpha + p ** alpha)) * \
(1 / (p ** alpha + 1))
return r | ddffac7b5af40147d6653501a72fd7c5c501a2fa | 692,435 |
def delete_zero_amount_exchanges(data, drop_types=None):
"""Drop all zero value exchanges from a list of datasets.
``drop_types`` is an optional list of strings, giving the type of exchanges to drop; default is to drop all types.
Returns the modified data."""
if drop_types:
dont_delete = lambda x: x["type"] not in drop_types or x["amount"]
else:
dont_delete = lambda x: x["amount"]
for ds in data:
ds["exchanges"] = list(filter(dont_delete, ds["exchanges"]))
return data | 8e9c214826398959b74e7bffc1e41ea635172d0a | 692,439 |
def line_from_two_points(x1, y1, x2, y2):
"""
Helper function to return the equation of a line
passing through any two points.
:Parameters:
x1: float
X coordinate of first point
y1: float
Y coordinate of first point
x2: float
X coordinate of second point
y2: float
Y coordinate of second point
:Returns:
(slope, intercept) or (None, xposition) if the slope is infinite.
"""
xdiff = (x2-x1)
if abs(xdiff) > 0.0:
ydiff = (y2-y1)
slope = ydiff / xdiff
intercept = y1 - slope * x1
return (slope, intercept)
else:
return (None, x1) | 8496f9b9116676d9b91bdf9277f2c1e0db34c491 | 692,440 |
def extract_objective(objective_field):
"""Extract the objective field id from the model structure
"""
if isinstance(objective_field, list):
return objective_field[0]
return objective_field | cc7ebd22ed4bd5619506461ce97f0d9329911881 | 692,441 |
from typing import List
from typing import Dict
def make_property_dict(key: str, data: List) -> Dict:
"""Organize real estate data for the debtor in 106 A/B
:param key: The ID for the property in the form
:param data: Extracted content from real estate section
:return: Organized property information
"""
if len(data) == 10:
property_id = data[9]
else:
property_id = ""
return {
"key": key,
"address": data[0],
"city": data[1],
"state": data[2],
"zip": data[3],
"property_value": data[4],
"your_property_value": data[5],
"other": data[6],
"ownership_interest": data[7],
"county": data[8],
"property_id": property_id,
} | c41c5e11e07719bf9b60b37ddc379731c77d7492 | 692,442 |
def re_run_game() -> bool:
"""Function that asks for new game or stops playing"""
return input("Would you like to play another game 'Y' or 'N'? ").lower().startswith("y") | f5f0a54e1ac08242dd08afc4612f5ad3f4630a7e | 692,446 |
from typing import Tuple
def tokenize_version(version_string: str) -> Tuple[int, int, int]:
"""Tokenize a version string to a tuple.
Truncates qualifiers like ``-dev``.
:param version_string: A version string
:return: A tuple representing the version string
>>> tokenize_version('0.1.2-dev')
(0, 1, 2)
"""
before_dash = version_string.split('-')[0]
major, minor, patch = before_dash.split('.')[:3] # take only the first 3 in case there's an extension like -dev.0
return int(major), int(minor), int(patch) | 2dbca80d7fbd0e504adbdd6901f42a87452116ac | 692,447 |
def MVP_T2D(x,t,p):
"""
Defines the differential equations for the
4 compartment Kandarian Model modified for T2D (Aradottir et al. 2019)
Written by: Dinesh Krishnamoorthy, May 2020
Arguments:
x : vector of the state variables:
x = [I_s,I_p,I_e,G]
t : time
p : vector of the parameters:
p = [u,SI,pEGP,B,tau1,p2,pGEZI]
States:
I_s - Subcutaneous Insulin I_sc [U/day]
I_p - Plasma Insulin I_p [U/day]
I_e - Insulin effect on glucose I_eff [U/day]
G - Glucose concentration in plasma [mmol/L]
Input:
u - exogenous insulin input [U/day]
Disturbances:
SI - Insulin sensitivity [1/U]
pEGP - rate of endogenous glucose production [mmol/L day]
B - Endogenous insulin production co-eff beta [U L/mmol day]
Parameters:
tau1 - time constant [day]
p2 - delay in insulin action [1/day]
pGEZI-rate of glucose elimination from plasma [1/day]
"""
I_s, I_p, I_e, G = x
u, SI, pEGP, B, tau1, p2, pGEZI = p
dx1 = (u - I_s)/tau1
dx2 = (I_s - I_p)/tau1
dx3 = p2*(I_p + B*G) - p2*I_e
dx4 = -(pGEZI + SI*I_e)*G + pEGP
f = [dx1,dx2,dx3,dx4]
return f | eb3f54ff363629d90752512a2e4ac0ecff9ef14a | 692,453 |
def enumerate_keyed_param(param, values):
"""
Given a param string and a dict of values, returns a flat dict of keyed, enumerated params.
Each dict in the values list must pertain to a single item and its data points.
Example:
param = "InboundShipmentPlanRequestItems.member"
values = [
{'SellerSKU': 'Football2415',
'Quantity': 3},
{'SellerSKU': 'TeeballBall3251',
'Quantity': 5},
...
]
Returns:
{
'InboundShipmentPlanRequestItems.member.1.SellerSKU': 'Football2415',
'InboundShipmentPlanRequestItems.member.1.Quantity': 3,
'InboundShipmentPlanRequestItems.member.2.SellerSKU': 'TeeballBall3251',
'InboundShipmentPlanRequestItems.member.2.Quantity': 5,
...
}
"""
if not values:
# Shortcut for empty values
return {}
if not param.endswith('.'):
# Ensure the enumerated param ends in '.'
param += '.'
if not isinstance(values, (list, tuple, set)):
# If it's a single value, convert it to a list first
values = [values, ]
for val in values:
# Every value in the list must be a dict.
if not isinstance(val, dict):
# Value is not a dict: can't work on it here.
raise ValueError((
"Non-dict value detected. "
"`values` must be a list, tuple, or set; containing only dicts."
))
params = {}
for idx, val_dict in enumerate(values):
# Build the final output.
params.update({
'{param}{idx}.{key}'.format(param=param, idx=idx+1, key=k): v
for k, v in val_dict.items()
})
return params | 20d3120e45db04c30ae44e4c97440ea6b7757b86 | 692,459 |
def error_in_query(q, task, log):
"""call after every q.exec_ to check for errors;
logs error and problematic query,
returns error message for QMessagebox if error found,
False if no error found
"""
lasterr = q.lastError()
if lasterr.isValid():
msg = "An error occurred while {}:".format(task)
log.error(msg)
log.error('FAILED QUERY: "{}"'.format(q.lastQuery()))
return msg + "\n\n{}".format(lasterr.text())
else:
return False | 44701d315d37630940b167810c9e7de562ad4e37 | 692,467 |
import six
def let(__context__, *args, **kwargs):
""":yaql:let
Returns context object where args are stored with 1-based indexes
and kwargs values are stored with appropriate keys.
:signature: let([args], {kwargs})
:arg [args]: values to be stored under appropriate numbers $1, $2, ...
:argType [args]: chain of any values
:arg {kwargs}: values to be stored under appropriate keys
:argType {kwargs}: chain of mappings
:returnType: context object
.. code::
yaql> let(1, 2, a => 3, b => 4) -> $1 + $a + $2 + $b
10
"""
for i, value in enumerate(args, 1):
__context__[str(i)] = value
for key, value in six.iteritems(kwargs):
__context__[key] = value
return __context__ | c1c1e55b6b514ea88594f8126c7ced7aa8b1d2e5 | 692,469 |
def outlier_removal_null(dataframe, colname, low_cut, high_cut):
"""Replace outliers with empty values on dataframe[colname]"""
col = dataframe[colname]
dataframe.loc[
col.apply(
lambda x: isinstance(x, (int, float))
and (x < low_cut or x > high_cut)
),
colname,
] = None
return dataframe | ace03738c7d2e3482a8473e0632a65f428b0f4fd | 692,470 |
def get_date_shortcode(date_str):
"""
Get shortcode for the standard date strings, to use in submodel names
"""
if date_str == "std_contest":
return "SC"
elif date_str == "std_contest_daily":
return "SCD"
elif date_str == "std_future":
return "SF"
elif date_str == "std_test":
return "ST"
elif date_str == "std_val":
return "SV"
elif date_str == "std_contest_eval":
return "SCE"
elif date_str == "std_contest_eval_daily":
return "SCED"
elif date_str == "std_paper":
return "SP"
else:
return date_str | af0aaa57e40972d6f6ea6b1a7960f3ae8d0cbbdf | 692,471 |
import requests
def check_ssl(url):
"""Check if the ssl certificate is valid."""
try:
requests.get(url, verify=True, timeout=3)
return True
except Exception:
return False | 3f0e2015d9f11f3f49b83d9799cd511deabd7c51 | 692,472 |
def cal_confidence(antecedents_support, combination_support):
"""
calculate confidence of antecedents and consequents
Parameters
----------
antecedents_support : float
support of antecedents.
for example :
- 0.43
combination_support : float
support of combination.
for example :
- 0.35
Returns
-------
confidence of antecedents and combination.
for example :
= 0.35 / 0.43
= 0.813
"""
try:
confidence = combination_support / antecedents_support
return round(confidence, 3)
except ZeroDivisionError:
raise ValueError("antecedents support supposed not be zero !") | 998d517448139e94658fd3b327e6e3b9303c32ee | 692,474 |
def multi_pMethod(args):
"""
Runs the pMethod function and returns the results plus the id of the node
"""
id, pMethod, dataset1, dataset2 = args
worst_pvalue, best_pvalue, worst_sim_score, best_sim_score, worst_rep_1, worst_rep_2, best_rep_1, best_rep_2 = pMethod(dataset1, dataset2)
return id, worst_pvalue, best_pvalue, worst_sim_score, best_sim_score, worst_rep_1, worst_rep_2, best_rep_1, best_rep_2 | 70677e23cd24f869ee3943265c0aa4aa7f2c421e | 692,477 |
def get_hemisphere(lat):
"""For a given latitude, return N or S."""
if lat < 0.0:
return 'S'
else:
return 'N' | 7a70a73e41927c286dfc6514061d1fccadadaabe | 692,479 |
from typing import List
import re
def purify(lines: List[str]) -> List[str]:
"""Remove redundant white spaces from list of lines"""
lines_stripped = [re.sub(r'\s+', ' ', line).strip() for line in lines]
return [line for line in lines_stripped if line] | f3791adccd60092d2449578e14dadccade7f9d00 | 692,481 |
import hashlib
from datetime import datetime
def get_unique_job_id() -> str:
"""
Returns a 64 hex-character (lowercase) string.
e.g., 'e2cddf55dc410ec584d647157388e96f22bf7b60d900e79afd1c56e27aa0e417'
:return string:
"""
job_id = hashlib.sha256(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').encode('utf-8')).hexdigest()
# We no longer use TxJob so can't check it for duplicates
# (but could theoretically check the preconvert bucket since job_id.zip is saved there).
#while TxJob.get(job_id):
#job_id = hashlib.sha256(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').encode('utf-8')).hexdigest()
return job_id | 3ca19bcc3ee819f0f58629149399103603a15741 | 692,487 |
def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | f5ab92363c5a8406421470a817f49a16c43b7a90 | 692,490 |
def unpack_forestryDB(objects):
"""unpacks hierarchical structure of Forestry DB objects into a easily savable format
Args:
objects (dict): Forestry DB nested dict object
Returns:
values (list): list of values saved in dict object
"""
values = [
objects['properties']['ig_test'],
objects['properties']['ig_date'],
objects['properties']['created'],
objects['properties']['id'],
objects['properties']['ig_time'],
objects['properties']['ig_confidence'],
objects['properties']['ig_identity'],
objects['geometry']['coordinates'][0],
objects['geometry']['coordinates'][1],
objects['geometry']['type'],
objects['type']
]
return values | 7bd18a16de489a3ff39d6d207f26f5b25d8359a1 | 692,496 |
def CT_to_class(CT):
"""
Converts a CT layer from raw sea ice concentrations (0-100) to class ids (0-10).
"""
CTs = list(range(0, 110, 10))
class_ids = list(range(0, 11, 1))
for i in range(len(CTs)):
CT[CT == CTs[i]] = class_ids[i]
return CT | 7d212e89bea2c3b4603018e065a826a78a2d66fe | 692,502 |
from typing import DefaultDict
from typing import Tuple
import copy
from typing import List
def laplace_smooth_cmd_counts(
seq1_counts: DefaultDict[str, int],
seq2_counts: DefaultDict[str, DefaultDict[str, int]],
start_token: str,
end_token: str,
unk_token: str,
) -> Tuple[DefaultDict[str, int], DefaultDict[str, DefaultDict[str, int]]]:
"""
Apply laplace smoothing to the input counts for the cmds.
In particular, add 1 to each of the counts, including the unk_token. By including the
unk_token, we can handle unseen commands.
Parameters
----------
seq1_counts: DefaultDict[str, int]
individual command counts
seq2_counts: DefaultDict[str, DefaultDict[str, int]]
sequence command (length 2) counts
start_token: str
dummy command to signify the start of a session (e.g. "##START##")
end_token: str
dummy command to signify the end of a session (e.g. "##END##")
unk_token: str
dummy command to signify an unseen command (e.g. "##UNK##")
Returns
-------
tuple of laplace smoothed counts:
individual command counts,
sequence command (length 2) counts
"""
seq1_counts_ls = copy.deepcopy(seq1_counts)
seq2_counts_ls = copy.deepcopy(seq2_counts)
cmds: List[str] = list(seq1_counts_ls.keys()) + [unk_token]
for cmd1 in cmds:
for cmd2 in cmds:
if cmd1 != end_token and cmd2 != start_token:
seq1_counts_ls[cmd1] += 1
seq2_counts_ls[cmd1][cmd2] += 1
seq1_counts_ls[cmd2] += 1
return seq1_counts_ls, seq2_counts_ls | 3ca9dbc3da5418944d66e9c8da8c770da6b30a05 | 692,503 |
def fully_qualified_symbol_name(name) -> bool:
"""
Checks if `name` is a fully qualified symbol name.
"""
return (
isinstance(name, str)
and "`" in name
and not name.startswith("`")
and not name.endswith("`")
and "``" not in name
) | 11fdb71d9a733c5669618dd2fde44d53379f6b54 | 692,504 |
def hex_colour(color: int) -> str:
"""
Converts an integer representation of a colour to the RGB hex value.
As we are using a Discord dark theme analogue, black colours are returned as white instead.
"""
colour = f"#{color:0>6X}"
return colour if colour != "#000000" else "#FFFFFF" | 6bd34a0abd0e2465c89abe9481f2d04e8a54091d | 692,508 |
def dict_to_patch_name(patch_image_name_dict):
""" Usage: patch_name = dict_to_patch_name(patch_image_name_dict)
convert the dictionary into a file name string
Args:
patch_image_name_dict: {'case_id': 'd83cc7d1c94',
'location_x': 100,
'location_y': 340,
'class_label': 'dermis',
'file_type': '.jpg' }
Returns:
patch_name: file name (without directory path)
"""
if len(patch_image_name_dict['file_ext']) > 1 and patch_image_name_dict['file_ext'][0] != '.':
patch_image_name_dict['file_ext'] = '.' + patch_image_name_dict['file_ext']
patch_name = patch_image_name_dict['case_id']
patch_name += '_%i'%patch_image_name_dict['location_x']
patch_name += '_%i'%patch_image_name_dict['location_y']
patch_name += '_%s'%patch_image_name_dict['class_label']
patch_name += '%s'%patch_image_name_dict['file_ext']
return patch_name | 512bf8f291613967f3b7ef06299f8912ca0c140a | 692,509 |
def ieee_1789_2015(frequency:float, percent_flicker:float) -> str:
"""Tests for compliance with IEEE 1789-2015
Refer to 8.1.1 Simple recommended practices in IEEE 1789-2015 for rule definitions
Parameters
----------
frequency : float
The flicker frequency in Hertz
percent_flicker : float
The flicker percentage
Returns
-------
str
Either of: "No Risk", "Low Risk", "High Risk"
"""
if frequency > 3000:
return "No Risk"
if frequency < 90:
if percent_flicker < 0.01 * frequency:
return "No Risk"
if percent_flicker < 0.025 * frequency:
return "Low Risk"
# Other flicker <= 3 kHz
if percent_flicker < 0.0333 * frequency:
return "No Risk"
if frequency <= 1250:
if percent_flicker < 0.08 * frequency:
return "Low Risk"
return "High Risk" | 433e4f8eedde40880c26812e3fcd977cdcae83f2 | 692,510 |
def mult(x,y):
"""
Take two integers x and y and return their product
"""
product = 0
for i in range(x):
product += y
return product | 379f9b56b9dd6f5996a03cfe61733fb6d5a390b9 | 692,515 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.