content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def posIntInput(string):
"""Function to ensure that the input is a positive integer
Arguments:
string {str} -- string to print while taking input from user
"""
while True:
try:
while True:
value = int(input(string))
if value > 0:
break
else:
print("Please enter a positive integer.\n")
break
except ValueError:
print("\nError: That is not an integer")
print("Press Enter to input the value again.\n")
return value | 2cdcabab003811795f1f8a640a4eeb4d41cb42a8 | 689,471 |
def flip_ctrlpts_u(ctrlpts, size_u, size_v):
""" Flips a list of 1-dimensional control points in u-row order to v-row order.
**u-row order**: each row corresponds to a list of u values (in 2-dimensions, an array of [v][u])
**v-row order**: each row corresponds to a list of v values (in 2-dimensions, an array of [u][v])
:param ctrlpts: control points in u-row order
:type ctrlpts: list, tuple
:param size_u: size in u-direction
:type size_u: int
:param size_v: size in v-direction
:type size_v: int
:return: control points in v-row order
:rtype: list
"""
new_ctrlpts = []
for i in range(0, size_u):
for j in range(0, size_v):
temp = [float(c) for c in ctrlpts[i + (j * size_u)]]
new_ctrlpts.append(temp)
return new_ctrlpts | 6c28a487b05de5490d244a04d19d25c575ebdd75 | 689,472 |
def divup(x: int, y: int) -> int:
"""Divide x by y and round the result upwards."""
return (x + y - 1) // y | 5fa0ede218aa30bb58eadaedf32da0fd853664f7 | 689,473 |
def create_test_results(suite_name, test_name, timestamp, command_args_printable, rc):
"""
Create a minimal test results object for test cases that did not produce their own
:param suite_name: the name of the subdirectory for the suite this test was run in
:param test_name: the name of the subdirectory for this test case
:param timestamp: the timestamp when the test case was run
:param command_args_printable: the command line args with sensitive args like password obscured
:param rc: the return of the test (zero for success, non-zero for fail)
:return: the results object that can be converted to JSON
"""
failed, passed = 0, 0
if rc == 0:
passed = 1
else:
failed = 1
results = {
"ToolName": "Suite: {}, Test case: {}".format(suite_name, test_name),
"Timestamp": {
"DateTime": "{:%Y-%m-%dT%H:%M:%SZ}".format(timestamp)
},
"CommandLineArgs": command_args_printable,
"TestResults": {
test_name: {
"fail": failed,
"pass": passed
}
},
"ServiceRoot": {}
}
return results | 2af568e094b64a08a8b32afb702492e17c1f8fcb | 689,474 |
import string
import random
def gen_passwd(size: int) -> str:
"""Generate password for ray.init call.
See
https://docs.ray.io/en/latest/configure.html?highlight=security#redis-port-authentication
This function was adapted from https://stackoverflow.com/a/2257449
Example
-------
ray.init(redis_password=gen_passwd(512))
Parameters
----------
size : int
how long the password should be
"""
# https://stackoverflow.com/a/2257449
chars = string.ascii_letters + string.digits
return ''.join(random.SystemRandom().choice(chars) for _ in range(size)) | 570b185d650bac6f7bf60411dfc72a46d6151ec3 | 689,475 |
def compare_extension(filename: str, expected_extension: str):
""" Compare the extension of the given file with the expected extension """
return filename.endswith(expected_extension) | c6021ec04fe287f70a3eadf7f977e8f29a6937fc | 689,478 |
import hashlib
def md5(content):
"""
Generates md5 hash of content 2019-03-10
:param content: a data for md5 generation
:return: an md5 hash of a content
"""
hash_md5 = hashlib.md5()
hash_md5.update(content)
return hash_md5.hexdigest() | ee9a3dea80a83d2d503a160d2ed20681a1523eef | 689,481 |
def aprs_object_name(par):
"""
Return global name for the aprs object
"""
if "object_name" in par:
return par["object_name"].strip()
return par["from"] | f6b48edeeedb688ff9c44d316d6349c1ce667076 | 689,483 |
import random
from time import gmtime, strftime
def generate_DOB(age=65):
"""Randomly generate a month & date for DOB """
birth_month = random.randint(1,12)
if birth_month == "1" or "3" or "5" or "7" or "8" or "10" or "12":
birth_day = random.randint(1,31)
if birth_month == "2":
birth_day = random.randint(1,28)
else:
birth_day = random.randint(1,30)
"""Can not use the age generator function here for some reason but this code
worked on generate_data_english.py. For now, passing dummy age into the function
to make it work for the time being. I did input reference to import generator in
the beginning of the program but got stuck on 'unicode_encoding'
age = generator.GenerateFreqAlt(attribute_name = 'agejy',
freq_file_name = 'lookup_files/age_gender_ratio_female.csv',
has_header_line = False,
unicode_encoding = unicode_encoding_used) """
year_system = strftime ("%Y", gmtime())
year_from_age = int(year_system) - age
DOB = str(birth_month) +'/' + str(birth_day) + '/' + str(year_from_age)
return DOB | 1bd23e352ff01a0287d6e13b16604f0896caa9cd | 689,487 |
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p] | f93e11fd38e1f47e0e183e60a363d035a016b5c5 | 689,488 |
def parse_csv_data(csv_filename: str) -> list:
"""Parses through a CSV file, reading and storing each line of the file.
Opens a csv file, assigning covid_csv_file to a list of all lines in the
file. For each line in the file, the newline character and the commas are
removed from the file, with each line being appended to a local list
that is then returned.
Args:
csv_filename (str): The name of the csv file to be parsed, given as
a string. This allows for data to be extracted from the csv file.
Returns:
list: covid_csv_data. This is a list, where each index is a line from
the csv file. This allows for the data to be accessed and modified
much easier (specific values can be accessed much easier) than if
it were in plain csv format.
"""
covid_csv_data = []
with open(csv_filename, 'r', encoding='utf8') as covid_csv_file:
covid_csv_file = covid_csv_file.readlines()
for index in covid_csv_file:
# Removing the newline escape character from the line.
index = index[:-1:]
# Splits the line into each section, converts it to a tuple
# And adds it to a list.
covid_csv_data.append(tuple(index.split(",")))
return covid_csv_data | 4c7edfd0f06a796f43f7793e98c7174c60cf704d | 689,491 |
def get_iscam_mws(intensities, mw_intensity_line_pars=None):
"""
Calculate the molecular weights of the intensities with the line parameters
`mw_intensity_line_pars`
Parameters
----------
intensities : np.ndarray
inensities of an iscam measurements
mw_intensity_line_pars : array like
[slope, intercept] of the mw to intensities linear function.
"""
mw_intensity_line_pars = [1, 0] if mw_intensity_line_pars is None \
else mw_intensity_line_pars
slope, intercept = mw_intensity_line_pars
mws = (intensities - intercept) / slope
return mws | 9fdc17f17fe7341c9567e8626f025bac1c42d625 | 689,494 |
import torch
def torch_dot(x: torch.Tensor, y: torch.Tensor):
"""
Dot product of two tensors.
"""
return (x * y).sum(-1) | d250bb8a6f7e18cf55dbfaf32a6219b6e49582a2 | 689,497 |
def _CheckTestDataReadmeUpdated(input_api, output_api):
"""
Checks to make sure the README.md file is updated when changing test files.
"""
test_data_dir = input_api.os_path.join('media', 'test', 'data')
readme_path = input_api.os_path.join('media', 'test', 'data', 'README.md')
test_files = []
readme_updated = False
errors = []
for f in input_api.AffectedFiles():
local_path = f.LocalPath()
if input_api.os_path.dirname(local_path) == test_data_dir:
test_files.append(f)
if local_path == readme_path:
readme_updated = True
break
if test_files and not readme_updated:
errors.append(output_api.PresubmitPromptWarning(
'When updating files in ' + test_data_dir + ', please also update '
+ readme_path + ':', test_files))
return errors | d2332b87137646e9eeda2ecca35b1e4f6c0e3125 | 689,499 |
def format_custom_attr(ddic):
"""
Format a dictionary of dictionaries in string format in the "custom attribute" syntax
e.g. custom="readingOrder {index:1;} structure {type:heading;}"
"""
s = ""
for k1, d2 in ddic.items():
if s:
s += " "
s += "%s" % k1
s2 = ""
for k2, v2 in d2.items():
if s2:
s2 += " "
s2 += "%s:%s;" % (k2, v2)
s += " {%s}" % s2
return s | 9d52d9e2d90b36a0c9363f55655a073851ddd009 | 689,500 |
import re
def mark_quoted_strings(sql):
"""Mark all quoted strings in the SOQL by '@' and get them as params,
with respect to all escaped backslashes and quotes.
"""
pm_pattern = re.compile(r"'[^\\']*(?:\\[\\'][^\\']*)*'")
bs_pattern = re.compile(r"\\([\\'])")
out_pattern = re.compile("^[-!()*+,.:<=>\w\s]*$")
start = 0
out = []
params = []
for match in pm_pattern.finditer(sql):
out.append(sql[start:match.start()])
assert out_pattern.match(sql[start:match.start()])
params.append(bs_pattern.sub('\\1', sql[match.start() + 1:match.end() -1]))
start = match.end()
out.append(sql[start:])
assert out_pattern.match(sql[start:])
return '@'.join(out), params | 22ddf388b9cfe9bece59a1718d4679a5f5d29aae | 689,503 |
def eff_heat_pump(temp_diff, efficiency_intersect, m_slope=-.08, h_diff=10):
"""Calculate efficiency of heat pump
Parameters
----------
temp_diff: array
Temperature difference
efficiency_intersect : float,default=-0.08
Extrapolated intersect at temp diff of 10 degree (which is treated as efficiency)
m_slope : float, default=10
Temperature dependency of heat pumps (slope) derived from Staffell et al. (2012),
h_diff : float
Temperature difference
Return
------
efficiency_hp : array
Efficiency of heat pump
Note
----
Because the efficieny of heat pumps is temperature dependent, the efficiency needs to
be calculated based on slope and intersect which is provided as input for temp difference 10
and treated as efficiency
The intersect at temp differenc 10 is for ASHP about 6, for GSHP about 9
"""
#efficiency_hp = m_slope * h_diff + (intersect + (-1 * m_slope * 10))
#var_c = efficiency_intersect - (m_slope * h_diff)
#var_c = efficiency_intersect - (m_slope * h_diff)
#efficiency_hp = m_slope * temp_diff + var_c
#SLOW
efficiency_hp = m_slope * temp_diff + (efficiency_intersect - (m_slope * h_diff))
#FAST
#efficiency_hp = -.08 * temp_diff + (efficiency_intersect - (-0.8))
return efficiency_hp | c2643af29cd3fd25968a9374e56082378987e2d6 | 689,505 |
def equally_sized_accessor(elements, n_variadic, n_preceding_simple,
n_preceding_variadic):
"""
Returns a starting position and a number of elements per variadic group
assuming equally-sized groups and the given numbers of preceding groups.
elements: a sequential container.
n_variadic: the number of variadic groups in the container.
n_preceding_simple: the number of non-variadic groups preceding the current
group.
n_preceding_variadic: the number of variadic groups preceding the current
group.
"""
total_variadic_length = len(elements) - n_variadic + 1
# This should be enforced by the C++-side trait verifier.
assert total_variadic_length % n_variadic == 0
elements_per_group = total_variadic_length // n_variadic
start = n_preceding_simple + n_preceding_variadic * elements_per_group
return start, elements_per_group | fb724c9bf7b012472f76ada6181536bbaa2274f0 | 689,507 |
def _make_extra_string(s=''):
""" Create an extra function that just returns a constant string.
"""
def extra(attr, die, section_offset):
return s
return extra | 45a583d58214c622e035c297b9fe54289a785d4e | 689,509 |
def previous(field):
"""Generates s-expression to access a `field` previous value.
"""
return ["f", field, -1] | 77a346f18db87895cfd83464702b654114d55e69 | 689,514 |
def is_inside(x, y, window):
"""
Check if (x, y) is a valid coordinate in input window
Args:
x (int): x-coordinate
y (int): y-coordinate
window (face_spinner.Window): Window
Returns:
bool -> True if valid, False otherwise
"""
if window.x <= x < (window.x + window.w) and window.y <= y < (window.y + window.h):
return True
else:
return False | 61667e218edb8d1474587d1579b06e44a32a4867 | 689,515 |
def is_source_directory(src_count, files_count):
"""
Return True is this resource is a source directory with at least over 90% of
source code files at full depth.
"""
return src_count / files_count >= 0.9 | 5826aa4fe35dd513ddeea39311414acca4898cd8 | 689,520 |
def _prev_char(s: str, idx: int):
"""Returns the character from *s* at the position before *idx*
or None, if *idx* is zero.
"""
if idx <= 0:
return None
else:
return s[idx - 1] | aebcb183ea40f9916e07fa8d976754717829c836 | 689,522 |
from typing import Sequence
from typing import Dict
from typing import Union
def get_instance_class_from_properties_seq(
instance_idx: Sequence, map_dict: Dict[str, Union[str, int]]) -> Sequence:
"""
Extract instance classes form mapping dict
Args:
instance_idx: instance ids present in segmentaion
map_dict: dict mapping instance ids (keys) to classes
Returns:
Sequence[int]: extracted instance classes
"""
instance_idx = sorted(instance_idx)
classes = [int(map_dict[str(int(idx))]) for idx in instance_idx]
return classes | df0a6bf84e7e8580be2e4152c8e982184c26867d | 689,526 |
def _item_to_value_identity(iterator, item):
"""An item to value transformer that returns the item un-changed."""
# pylint: disable=unused-argument
# We are conforming to the interface defined by Iterator.
return item | 07f15c861583196908a76e40646ad649da5f4922 | 689,528 |
def get_first_line(filename, nlines=1):
"""return the first line of a file.
Arguments
---------
filename : string
The name of the file to be opened.
nlines : int
Number of lines to return.
Returns
-------
string
The first line(s) of the file.
"""
# U is to open it with Universal newline support
with open(filename, 'rU') as f:
line = "".join([f.readline() for x in range(nlines)])
return line | 384146c26d0dbe007951b91a67a8a6de89328241 | 689,536 |
def _IsBuildRunning(build_data):
"""Checks whether the build is in progress on buildbot.
Presence of currentStep element in build JSON indicates build is in progress.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if build is in progress, otherwise False.
"""
current_step = build_data.get('currentStep')
if (current_step and current_step.get('isStarted') and
current_step.get('results') is None):
return True
return False | 34b0f46e2fae86cb82a82c3e73c010f4517eb4ac | 689,539 |
import ipaddress
def search_prefix_list(ip, prefix_list):
"""
Check if IP address exists in some prefix which is part of `prefix_list`
`prefix_list` must be a list of tuples
Each tuple must be of form (ip_prefix_begin, ip_prefix_end) in int equivalent (check preparation step)
1. Convert IP to int equivalent
2. Binary search through list of tuples. Check if ip falls between (tuple[0], tuple[1])
3. Return prefix if it does
"""
if isinstance(ip, str) and ("." in ip or ":" in ip):
ip = int(ipaddress.ip_address(ip))
low = 0
high = len(prefix_list) - 1
while (low <= high):
mid = (low + high) >> 1 # divide by 2
if ip >= prefix_list[mid][0] and ip <= prefix_list[mid][1]:
return mid
elif ip < prefix_list[mid][0]:
high = mid - 1
else:
low = mid + 1
return -1 | ae42cb909cb8b2d30eccbd2fbd0ad525206cacc4 | 689,540 |
def differ_in_at_most_one(first, second):
"""Check if two strings differ in at most one position."""
# Check if length differences make it possible
if abs(len(first) - len(second)) > 1:
return False
if len(first) > len(second):
longer, shorter = first, second
else:
longer, shorter = second, first
one_found = False
l, s = 0, 0
long_length = len(longer)
short_length = len(shorter)
while l < long_length and s < short_length:
if longer[l] != shorter[s]:
if one_found: # found second difference
return False
else:
one_found = True
# skip one, if we have different lengths
# position in shorter string should stay in place in that case
if long_length != short_length:
l += 1
else:
l += 1
s += 1
else:
l += 1
s += 1
return True | a3b86958d0a03a459a7909651b193c0f09a0f7a6 | 689,542 |
from pathlib import Path
def is_within(parent, child) -> bool:
"""
Check that a path is within another.
"""
return Path(parent).resolve() in Path(child).resolve().parents | 04f0347663bcc380ca0100f177571c55e8b43e8e | 689,553 |
import torch
def rankdata_pt(b, tie_method='ordinal', dim=0):
"""
pytorch equivalent of scipy.stats.rankdata, GPU compatible.
:param b: torch.Tensor
The 1-D or 2-D tensor of values to be ranked. The tensor is first flattened
if tie_method is not 'ordinal'.
:param tie_method: str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
Supports 1-D tensors only.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
Supports 1-D tensors only.
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
Supports 1-D tensors only.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
Supports 1-D tensors only.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'ordinal' to match argsort.
:param dim: int, optional
The axis of the observation in the data if the input is 2-D.
The default is 0.
:return: torch.Tensor
An array of length equal to the size of `b`, containing rank scores.
"""
# b = torch.flatten(b)
if b.dim() > 2:
raise ValueError('input has more than 2 dimensions')
if b.dim() < 1:
raise ValueError('input has less than 1 dimension')
order = torch.argsort(b, dim=dim)
if tie_method == 'ordinal':
ranks = order + 1
else:
if b.dim() != 1:
raise NotImplementedError('tie_method {} not supported for 2-D tensors'.format(tie_method))
else:
n = b.size(0)
ranks = torch.empty(n).to(b.device)
dupcount = 0
total_tie_count = 0
for i in range(n):
inext = i + 1
if i == n - 1 or b[order[i]] != b[order[inext]]:
if tie_method == 'average':
tie_rank = inext - 0.5 * dupcount
elif tie_method == 'min':
tie_rank = inext - dupcount
elif tie_method == 'max':
tie_rank = inext
elif tie_method == 'dense':
tie_rank = inext - dupcount - total_tie_count
total_tie_count += dupcount
else:
raise ValueError('not a valid tie_method: {}'.format(tie_method))
for j in range(i - dupcount, inext):
ranks[order[j]] = tie_rank
dupcount = 0
else:
dupcount += 1
return ranks | 88bd901c945d6ca9b3d2e785c6b0ea627116a03f | 689,557 |
def get_temp_var(used_vars):
"""get a temp variable name """
for i in range(0, 1000):
var_name = "t{}".format(i)
if var_name not in used_vars:
return var_name | cab07c74bf6807a72b4968deb6d49e31f85df3d6 | 689,558 |
def getCoinbaseAddr (node, blockHash):
"""
Extract the coinbase tx' payout address for the given block.
"""
blockData = node.getblock (blockHash)
txn = blockData['tx']
assert len (txn) >= 1
txData = node.getrawtransaction (txn[0], True, blockHash)
assert len (txData['vout']) >= 1 and len (txData['vin']) == 1
assert 'coinbase' in txData['vin'][0]
return txData['vout'][0]['scriptPubKey']['address'] | 12e87ecde415d013935a060b7008ddd4056c76c8 | 689,559 |
import time
def format_ampm(time_24hour) -> str:
"""Convert 24 hour to 12 hour system"""
t = time.strptime(time_24hour, "%H%M") # Create time object
timevalue_12hour = time.strftime("%-I:%M %p", t) # e.g. From 08:14 to 8:14 AM or 15:24 to 3:24 PM
return timevalue_12hour | 2e4852ba060df38b59f704610098c665d606d320 | 689,561 |
def all_filled(board):
"""
Returns True if all board is filled, False otherwise.
"""
for i in range(len(board)):
if board[i].count(None):
return False
return True | 2b3acc1b32630c0a7232ac9eb88691a61cbce0c0 | 689,564 |
import struct
def decode_ieee(val_int):
"""Decode Python int (32 bits integer) as an IEEE single precision format
Support NaN.
:param val_int: a 32 bit integer as an int Python value
:type val_int: int
:returns: float result
:rtype: float
"""
return struct.unpack("f",struct.pack("I", val_int))[0] | ce86bd855466adaab8b2bae2fa76e9f76526aa91 | 689,566 |
import re
def typeset_chemical(s: str) -> str:
"""
Typesets chemical formulas using Latex.
Parameters
----------
s : str
Input string
Returns
-------
str
Output string with chemical formulas typeset correctly
"""
parts = []
for n in re.sub(r'[A-Z]_\d', r'|\g<0>|', s).split('|'):
if re.match(r'[A-Z]_\d', n):
parts.extend([f'{n[:-2]}', '}', f'_{n[-1]}', '\\text{'])
else:
parts.append(n)
parts = ['\\text{'] + [n for n in parts if n]
if parts[-1] == '\\text{':
parts = parts[:-1]
ret = ''.join(parts)
if ret.count('{') == ret.count('}') + 1:
ret += '}'
return ret | 4a40675c9eef7480f40985cb4563ce38373e0c59 | 689,567 |
import torch
def log1p_exp(input_tensor):
""" Computationally stable function for computing log(1+exp(x)).
"""
x = input_tensor * input_tensor.ge(0).to(torch.float32)
res = x + torch.log1p(torch.exp(-torch.abs(input_tensor)))
return res | 788addfb31f05b193d4c4fe8e0563e98d123fbe7 | 689,568 |
from typing import OrderedDict
def tuples_as_dict(_list):
"""Translate a list of tuples to OrderedDict with key and val as strings.
Parameters
----------
_list : list of tuples
Returns
-------
collections.OrderedDict
Example
-------
::
>>> tuples_as_dict([('cmd', 'val'), ('cmd2', 'val2')])
OrderedDict([('cmd', 'val'), ('cmd2', 'val2')])
"""
_dict = OrderedDict()
for key, val in _list:
key = str(key)
val = str(val)
_dict[key] = val
return _dict | f7e3b3b829e91d903cca504e1597404f4e1a2ab2 | 689,570 |
def find_adjacent_segment_type(segments, time):
"""Find boundary type on left and right (NONSPEECH or SPEECH)"""
# find previous segment type
segments[0].append("NS")
prev = segments[0]
for i in range(1, len(segments)):
if (segments[i][0] - time) > prev[2]:
segments[i].append("NS") # nonspeech
else:
segments[i].append("SC") # speaker change
prev = segments[i]
# find following segment type
for i in range(0, len(segments) - 1):
if (segments[i][2] + time) < segments[i + 1][0]:
segments[i].append("NS") # nonspeech
else:
segments[i].append("SC") # speaker change
segments[len(segments) - 1].append("NS")
return segments | 1cc7ea22d5314eb3c5780612119c103ddb59dd0c | 689,571 |
import math
def get_elapsed_time(start, end):
"""
Compute elapsed time.
@param start: start time
@param end: end time
@return: elapsed time (string)
"""
diff = end - start
days, hours, minutes = [0, 0, 0]
s_time = []
if diff > 86400: # day
days = math.floor(diff / 86400)
diff = diff - days * 86400
if diff > 3600: # hour
hours = math.floor(diff / 3600)
diff = diff - hours * 3600
if diff > 60: # minute
minutes = math.floor(diff / 60)
diff = diff - minutes * 60
if days > 0:
s_time = "{0} days {1} hrs {2} min {3:.4f} sec".format(days, hours, minutes, diff)
# print(f"{days} days {hours} hrs {minutes} min {diff:.4f} sec")
elif hours > 0:
s_time = "{0} hrs {1} min {2:.4f} sec".format(hours, minutes, diff)
# print(f"{hours} hrs {minutes} min {diff:.4f} sec")
elif minutes > 0:
s_time = "{0} min {1:.4f} sec".format(minutes, diff)
# print(f"{minutes} min {diff:.4f} sec")
else:
s_time = "{0:.4f} sec".format(diff)
# print(f"{diff: .4f} sec")
return s_time | 4fd67b646876d098cc87d1f3fe9fdc264afd9828 | 689,572 |
import asyncio
def async_test(test):
"""
Decorator to run async test methods.
"""
def wrapper(*args, **kwargs):
asyncio.run(test(*args, **kwargs))
return wrapper | b03c10f6b16fb7af148d21a89f1d3485b4fe4681 | 689,574 |
import itertools
def flatten(sequence_list, cls=list):
"""
Flatten one level of nesting
:param sequence_list: list of sequence
:param cls: create instance of cls by flatten_gen
:return: cls instance or generator
"""
flatten_gen = itertools.chain.from_iterable(sequence_list)
return cls(flatten_gen) if cls else flatten_gen | 2e28b9c44b26f6749dfa8f02337aacaf2e74adc5 | 689,578 |
def calculate_precision_recall_f1score(y_pred, y_true, entity_label=None):
""" Calculates precision recall and F1-score metrics.
Args:
y_pred (list(AnnotatedDocument)): The predictions of an NER
model in the form of a list of annotated documents.
y_true (list(AnnotatedDocument)): The ground truth set of
annotated documents.
entity_label (str, optional): The label of the entity for which
the scores are calculated. It defaults to None, which means
all annotated entities.
Returns:
(3-tuple(float)): (Precision, Recall, F1-score)
"""
# Flatten all annotations
all_y_pred_ann = []
all_y_true_ann = []
if entity_label is None:
for annotated_document in y_pred:
all_y_pred_ann.extend(annotated_document.annotations)
for annotated_document in y_true:
all_y_true_ann.extend(annotated_document.annotations)
else:
for annotated_document in y_pred:
all_y_pred_ann.extend([
annotation for annotation in annotated_document.annotations
if annotation.label == entity_label
])
for annotated_document in y_true:
all_y_true_ann.extend([
annotation for annotation in annotated_document.annotations
if annotation.label == entity_label
])
tp = 0.0
fp = 0.0
fn = 0.0
# Convert true annotations to a set in O(n) for quick lookup
all_y_true_ann_lookup = set(all_y_true_ann)
# True positives are predicted annotations that are confirmed by
# their existence in the ground truth dataset. False positives are
# predicted annotations that are not in the ground truth dataset.
for annotation in all_y_pred_ann:
if annotation in all_y_true_ann_lookup:
tp += 1.0
else:
fp += 1.0
# Convert predictions to a set in O(n) for quick lookup
all_y_pred_ann_lookup = set(all_y_pred_ann)
# False negatives are annotations in the ground truth dataset that
# were never predicted by the system.
for annotation in all_y_true_ann:
if annotation not in all_y_pred_ann_lookup:
fn += 1.0
precision = tp / (tp + fp) if (tp + fp) > 0 else 0.
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.
f1_score = (2 * precision * recall) / (precision + recall) if\
(precision + recall) > 0 else 0.
return (precision, recall, f1_score) | 8352285bb099230e9bd5b2fd4d9244c95c4f3ed0 | 689,582 |
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = call_str.find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:call_str.find("(")]
args = call_str[open_paren_index+1:close_paren_index].split(",")
args = [arg.split("=")[0].strip() for arg in args]
args = [arg for arg in args if arg] # filter out empty strings
return function_name, args | 8224aaf86335cd1112673e98da3f827c51aed42e | 689,589 |
import json
def read_from_json(full_path_with_name):
"""Read from an arbitrary JSON and return the structure"""
with open(full_path_with_name, 'r') as mjson:
return json.load(mjson) | 8a547311689229ff707d19049d9eac5d016f99c0 | 689,590 |
def xml_tag_name(tag_name: str) -> str:
"""Cleans anonymous tag-names for serialization, so that the colon does not
lead to invalid XML::
>>> xml_tag_name(':Series')
'ANONYMOUS_Series__'
:param tag_name: the original tag name
:returns: the XML-conform tag_name
"""
if tag_name[:1] == ':':
return 'ANONYMOUS_%s__' % tag_name[1:]
return tag_name | 7fe39805cb1711d88b7f5c07cf90259eb6ab11b9 | 689,594 |
import uuid
def default_test_msg(prefix='', suffix='', sep=' '):
"""Create a random test string"""
return sep.join([prefix, uuid.uuid4().hex, suffix]) | 945c6aac41cb4e1d6a87af8461b0fdafea8150b9 | 689,596 |
import re
def polished(summary):
""" Polish summary, e.g. by cutting and trimming it.
Args:
summary: summary text to polish
Returns:
(part of) polished summary
"""
first_sentence = re.search('.*\.', summary)
if first_sentence:
return first_sentence.group().strip()
else:
return summary.strip() | 85388465a78d7e669c0fc0f35ad27ac19b35d807 | 689,597 |
def sample_exposure(image, sample_indices):
"""
A helper function which samples the given image at the specified indices.
:param image: a single RGB image to be sampled from
:param sample_indices: an array of the length N with the indices to sample at. N is the number of pixels
:return: sampled_red is an array of the length N with the sample from the red channel
sampled_green is an array of the length N with the sample from the green channel
sampled_blue is an array of the length N with the sample from the blue channel
"""
# Get the constituent channels.
red_img = image[:, :, 0].flatten()
green_img = image[:, :, 1].flatten()
blue_img = image[:, :, 2].flatten()
# Construct the samples.
sampled_red = [red_img[indice] for indice in sample_indices]
sampled_green = [green_img[indice] for indice in sample_indices]
sampled_blue = [blue_img[indice] for indice in sample_indices]
return sampled_red, sampled_green, sampled_blue | e2aec26e6be74d49c35e7ea80f894fe873817e86 | 689,598 |
def triangular(n):
"""Gives the n-th triangle number."""
return n*(n+1)/2 | 17b5cbd0f690dbf0043ea84da3fef56c82812599 | 689,599 |
def _get_command_prefix(properties):
"""
If multiple commands are registered with the same name, attempt to construct a unique
prefix from other information in the command's properties dictionary to distinguish one
command from another. Uses the properties' ``app`` and/or ``group`` keys to create the
prefix.
:param dict properties: Arbitrary key/value information related to a registered command.
:returns: A unique identifier for the command as a str.
"""
prefix_parts = []
if properties.get("app"):
# First, distinguish commands by app name.
prefix_parts.append(properties["app"].instance_name)
if properties.get("group"):
# Second, distinguish commands by group name.
prefix_parts.append(properties["group"])
return ":".join(prefix_parts) | 881765a59fdf34c6789f341cb8d60ca5dd6a2725 | 689,600 |
from typing import List
def extract_words(input_file_name: str) -> List[str]:
"""
Extracts a list of words from a word list. Expects one word per line.
:param input_file_name: the path of the file to extract the words from
:return: a list of words
"""
input_tokens = []
with open(input_file_name, "r", encoding='utf-8') as input_file:
for line in input_file.readlines():
token = line.strip()
if len(token) > 0:
input_tokens.append(token)
return input_tokens | 9cfe983f7e148c60a551ddd87bfcaa8ed02598a6 | 689,607 |
def checkbox_result_to_bool(res):
"""
Takes in a checkbox result from a form and converts it to a bool
Params:
res (str): the result string from a checkbox
Returns:
bool: the boolean value of res
"""
if res == "on":
return True
elif res == "off" or res is None:
return False
return None | 67b1c1a17a0b7f41ee5ed66a64641f0104acf8b7 | 689,608 |
def EQUAL(x, y):
"""checks if both given arguments are equal"""
return x == y | 64c1029ec8b98177ed3210743a020fd950b61fcd | 689,609 |
def alphaB(T):
"""
Returns Case B recombination coefficient (Osterbrock 1989; Krumholz+07)
2.59e-13*(T/1e4)**(-0.7)
"""
return 2.59e-13*(T/1e4)**(-0.7) | e5e5a870f2bc50948648dd5ad9ba5a608315e70f | 689,612 |
def first_non_repeating_letter(the_string):
"""
Find first non-repeating letter in a string.
Letters are to be treated case-insensitive,
which means 't' = 'T'. However, one must
return the first non-repeating letter as it
appears in the string, either in uppercase
or lowercase.
'sTress' -> 'T'
:param the_string: str, letters of alphabet
:return: str, single letter or ''
"""
single_letters = {}
# if the left index and right index of the letter
# are the same, we have a single letter. Here we
# enumerate on the lowercase version of the string
# so uppercase and lowercase letters are treated
# identically.
lowercase_string = the_string.lower()
for index, letter in enumerate(lowercase_string):
if lowercase_string.find(letter) == lowercase_string.rfind(letter):
single_letters[letter] = index
if len(single_letters) == 0:
return ''
# pick single letter with smallest index
lowercase_first_letter, index =\
min(single_letters.items(), key=lambda l: l[1])
# display the letter from the original string
# because it could be uppercase
return the_string[index] | b3faf048b1e81e48f9af7b0f295bf60f9bfde4f9 | 689,617 |
def weak_pareto_dominates(vec1, vec2):
"""
Returns whether vec1 weakly dominates vec2
"""
for i in range(len(vec1)):
if vec1[i] < vec2[i]:
return False
return True | ed5b6ea17bb8a12bb6deef430dec996c7363be3d | 689,620 |
def HtmlString_to_HtmlFile(html_string,file_name="test.html"):
"""Saves an html string as a file with the default name """
out_file=open(file_name,'w')
out_file.write(html_string)
out_file.close()
return file_name | bc4eca26877053f3eddd9ffd42bfd5801bf11c74 | 689,621 |
import requests
import tempfile
def download_file(url):
"""
Download and return temporary file
"""
print("url detected, downloading...")
response = requests.get(url)
# detect file type from MIME-TYPE of request
content_type = response.headers['content-type']
if content_type == 'application/pdf':
file_type = ".pdf"
elif content_type == "application/vnd.ms-powerpoint":
file_type = ".ppt"
elif content_type == "application/vnd.openxmlformats-officedocument.presentationml.presentation":
file_type = ".pptx"
else:
print("couldn't figure out type of downloaded file. aborting")
raise
print("downloaded {0}".format(file_type))
# write to temporary file
temp = tempfile.NamedTemporaryFile(suffix=file_type)
temp.write(response.content)
return temp | a7aeb60dbc06764fb131dfb27cd73ec0023b56e4 | 689,622 |
def comma_list_to_shape(s):
"""Parse a string of comma-separated ints into a valid numpy shape.
Trailing commas will raise an error.
Parameters
----------
s : str
A string of comma-separated positive integers.
Returns
-------
tuple
"""
if not isinstance(s, str):
raise TypeError("s must be a string")
if s == "":
raise ValueError("s is empty")
# split by comma into an array of strings and try to convert to int
shape = tuple(map(int, s.split(",")))
# check that each shape member is valid (positive int), return if valid
for i in range(len(shape)):
if shape[i] < 1:
raise ValueError(f"axis {i} of shape {shape} must be positive")
return shape | 6ca5358336e0356e8c7c540fb5dd977d0dbe07f9 | 689,624 |
def has_juniper_error(s):
"""Test whether a string seems to contain an Juniper error."""
tests = (
'unknown command.' in s,
'syntax error, ' in s,
'invalid value.' in s,
'missing argument.' in s,
)
return any(tests) | 5363806c8e7f0f791cffd5614626198ff323e27f | 689,629 |
def factorial(num):
"""
(int) -> int
Calcula el factorial de un número entero
>>> factorial(3)
6
>>> factorial(4)
24
:param num: int el numero a evaluar
:return: int el resultado del factorial
"""
if num == 1 or num == 0:
return 1
elif num < 0:
raise ValueError(f'no existe el factorial '
f'para {num}')
return num * factorial(num - 1) | 13ab92c6462ac180340097726d16ff9d898ca04d | 689,630 |
def condition(cond, rule):
""" Only apply rule if condition is true """
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
else:
return expr
return conditioned_rl | cfa8af9362fa2e8bf5f50cb7d08454ef0d241d7b | 689,632 |
import json
def to_json(obj):
"""
Converts the given object to a json string
:param obj: The input object
:type obj: object
:return: object as json string
:rtype: str
"""
return json.dumps(obj) | db66422fe3d87fe2469a1d2b4cfaa3ab81cd1eb0 | 689,633 |
from datetime import datetime
def set_date_from_string(value: str, format_: str = "%Y-%m-%dT%H:%M:%S"):
"""Generic function to format a string to datetime
Args: value: The value to be validated.
format_: A regex pattern to validate if the string has a specific format
Returns:
A datetime object
Raises:
ValueError: If the value violates the format constraint.
"""
if value is None:
return None
if isinstance(value, datetime):
return value
if value[-1] == "Z":
value = value[:-1]
for fmt in (format_, "%Y-%m-%d", "%Y-%m-%dT%H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f"):
try:
return datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError(f"Wrong date string format. The format {format_} "
f"should be followed. {str(value)} passed") | a5b1bf0356952d1faf4c61815bb8b6e4e0b50f9d | 689,636 |
def ChkCTStarOnly(cron_time_field):
"""Checks if a crontab field is only a *.
Args:
cron_time_field: Parsed cron time field to check.
Returns:
True if there's only a * in this field.
"""
if not cron_time_field:
return True
if len(cron_time_field) == 1 and cron_time_field[0].Kind == 'star':
return True
return False | 127eb83646315fc5e4917fa9ece4aab790035d71 | 689,638 |
def _stripBold(s):
"""Returns the string s, with bold removed."""
return s.replace('\x02', '') | da94e26846091ac3c4a501b86cd00a18bf06b1c1 | 689,640 |
def capitalize_title(title):
"""Convert the first letter of each word in the title to uppercase if needed.
:param title: str - title string that needs title casing.
:return: str - title string in title case (first letters capitalized).
"""
return title.title() | 38334a5d7e7ceb76f87df8ff00f77da5c9c4ca88 | 689,641 |
def perspectiveTransform(x, y, M):
""" Implementation of the perspective transform (homography) in 2D.
**Parameters**\n
x, y: numeric, numeric
Pixel coordinates of the original point.
M: 2d array
Perspective transform matrix.
**Return**\n
xtrans, ytrans: numeric, numeric
Pixel coordinates after projective/perspective transform.
"""
denom = M[2, 0]*x + M[2, 1]*y + M[2, 2]
xtrans = (M[0, 0]*x + M[0, 1]*y + M[0, 2]) / denom
ytrans = (M[1, 0]*x + M[1, 1]*y + M[1, 2]) / denom
return xtrans, ytrans | 28bdc116a515d5fc7fee41335c6960d3e18e1114 | 689,642 |
def decode_time(value):
"""time decoder
Used for fields such as:
duration=1234.123s
"""
if value == "never":
return value
time_str = value.rstrip("s")
return float(time_str) | ef03545b3653dfdbd91e64de06a08bb25e8ea396 | 689,646 |
def InstanceOverlap_OLD(instance1,instance2):
"""Returns True if given instances share a vertex."""
for vertex1 in instance1.vertices:
if vertex1 in instance2.vertices:
return True
return False | 0bbc42cc09ea0f67b8a04772eb0581b081dbb6dd | 689,653 |
import numbers
import string
def build_coder(shift):
"""
Returns a dict that can apply a Caesar cipher to a letter.
The cipher is defined by the shift value. Ignores non-letter characters
like punctuation and numbers. The empty space counts as the 27th letter
of the alphabet, so spaces should be mapped to a lowercase letter as
appropriate.
shift: 0 <= int < 27
returns: dict
Example:
>>> build_coder(3)
{' ': 'c', 'A': 'D', 'C': 'F', 'B': 'E', 'E': 'H', 'D': 'G', 'G': 'J',
'F': 'I', 'I': 'L', 'H': 'K', 'K': 'N', 'J': 'M', 'M': 'P', 'L': 'O',
'O': 'R', 'N': 'Q', 'Q': 'T', 'P': 'S', 'S': 'V', 'R': 'U', 'U': 'X',
'T': 'W', 'W': 'Z', 'V': 'Y', 'Y': 'A', 'X': ' ', 'Z': 'B', 'a': 'd',
'c': 'f', 'b': 'e', 'e': 'h', 'd': 'g', 'g': 'j', 'f': 'i', 'i': 'l',
'h': 'k', 'k': 'n', 'j': 'm', 'm': 'p', 'l': 'o', 'o': 'r', 'n': 'q',
'q': 't', 'p': 's', 's': 'v', 'r': 'u', 'u': 'x', 't': 'w', 'w': 'z',
'v': 'y', 'y': 'a', 'x': ' ', 'z': 'b'}
(The order of the key-value pairs may be different.)
"""
assert shift >= 0 and shift < 27, 'shift %s is not between 0 and 27' % shift
#numbers.Integral used in case of long integers
assert isinstance(shift, numbers.Integral), 'shift is not an integer'
coder = {}
lowercase_and_space = string.ascii_lowercase + ' '
uppercase_and_space = string.ascii_uppercase + ' '
# Shift letters over shift places
shifted_lowercase_and_space = lowercase_and_space[shift:] + lowercase_and_space[:shift]
shifted_uppercase_and_space = uppercase_and_space[shift:] + uppercase_and_space[:shift]
# Construct Caesar cipher dictionary
# Add uppercase letters first so ' ' will be overwritten to point to lowercase letter
for i in range(len(uppercase_and_space)):
coder[uppercase_and_space[i]] = shifted_uppercase_and_space[i]
for i in range(len(lowercase_and_space)):
coder[lowercase_and_space[i]] = shifted_lowercase_and_space[i]
return coder | 6d5df2c482fbce402ace08b4eda2e163f8352a2b | 689,656 |
def apply_ucrow_aggregation(X):
"""
Given a tensor of activations, aggregate by sum-pooling without weighting.
:param ndarray X:
3d tensor of activations with dimensions (channels, height, width)
:returns ndarray:
unweighted global image feature
"""
return X.sum(axis=(1, 2)) | 3094104c5eca326016825554f2429729822c10fe | 689,657 |
def create_groupings_columns(groupings_params):
"""
Strips all other parameters from groupings except name and columns
"""
new_groupings={}
for grouping_name, grouping_values in groupings_params.items():
for values_name, values in grouping_values.items():
if values_name is 'columns':
new_groupings[grouping_name]=values
return new_groupings | 249964be2d6077a251cc7bf5751d38ac9cff1c2b | 689,663 |
def _container_exists(blob_service_client, container):
"""Check if container exists"""
return next(blob_service_client.list_containers(container), None) is not None | d59f7e517876f093bbba8a580dd3fe31895075e2 | 689,664 |
import re
def parse_doi(doi: str) -> str:
"""Parses a DOI from e.g. a URL.
Args:
doi: DOI string.
Returns:
The (possibly trimmed) DOI.
Raises:
ValueError: if the DOI cannot be parsed.
"""
# See https://www.doi.org/doi_handbook/2_Numbering.html#2.2.
match = re.search(r'(10\.[\d.]+\/[a-zA-Z\d.]+)', doi)
if not match:
raise ValueError(f'could not parse DOI: {doi}')
return match.group(1) | af636a3b220cafbfbcbc58ee1c3591ce6bb1fb2c | 689,665 |
import itertools
def four_body_sum(spins):
"""Calculate four body term in the periodic lattice
Input:
spins: spins configuration matrix.
Output:
sum of four body terms.
"""
size = len(spins)
Sum = 0
for i,j in itertools.product(range(size), repeat=2):
Sum += spins[i,j] * spins[i,(j+1)%size] * spins[(i+1)%size,j] \
* spins[(i+1)%size, (j+1)%size] # only consider the right and top part for each site
return Sum | 3e1408e1618db137489d29b0ee93b1ee7d88564f | 689,667 |
from hashlib import md5
def part2_adventcoin_miner(secret_key, match='000000'):
"""
--- Part Two ---
Now find one that starts with six zeroes.
"""
for x in range(99999999):
newkey = md5(secret_key + str(x)).hexdigest()
if newkey[:len(match)] == match:
return (x) | 50bd8fffab429be86dd62e7c438ae703acc383da | 689,673 |
def duration(duration_ms: float) -> str:
"""
Formats duration into a string logged in stats
:param duration_ms: Duration in milliseconds
:return: formatted duration
"""
return "{:.2f}ms".format(duration_ms) | 1eb79456bc3ec2c37fda07f8eac719cf6591da5f | 689,676 |
def compare_probs(post_a, post_b):
"""Compute P(A > B) probability."""
return (post_a > post_b).sum() / post_a.size | fc85a1bd25130f9d473ab2d9578ff3edaf9b0795 | 689,683 |
from typing import Dict
def getCosponsors(fileDict: Dict, includeFields = []) -> list:
"""
Gets Cosponsors from data.json Dict. `includeFields` is a list of keys to keep. The most useful are probably 'name' and 'bioguide_id'.
Args:
fileDict (Dict): the Dict created from data.json
includeFields (list): the fields in the cosponsor object to keep. If no 'includeFields' list is provided, all fields are preserved.
Returns:
list: a list of cosponsors, with selected fields determined by includeFields
"""
cosponsors = fileDict.get('cosponsors', [])
if includeFields:
cosponsors = list(map(lambda cosponsor: { field: cosponsor.get(field) for field in includeFields }, cosponsors))
# for sponsor in cosponsors:
# if not sponsor.get('bioguide_id'):
# continue
# Sponsor.objects.create(**sponsor)
return cosponsors | 06f74c63d63db5d5377efcefbfc4a01e1a11d6b1 | 689,685 |
def ternary_search(f, xmin, xmax, epsilon=1e-6):
"""Ternary search.
Args:
f: An objective function. Must be convex downward.
xmin: The lower bound of the range to search.
xmax: The upper bound of the range to search.
epsilon: The epsilon value for judging convergence.
"""
l, r = xmin, xmax
while r - l > epsilon:
d = (r - l) / 3.0
m1 = l + d
m2 = l + 2.0 * d
if f(m1) < f(m2):
r = m2
else:
l = m1
return r | 6157d3ca011fe11415b8efdef015d850f5b852e0 | 689,690 |
def build_response_body(sentiment_prediction, confidence):
"""
Returns a formatted dict containing sentiment prediction.
:param sentiment_prediction:
:param confidence:
:return:
"""
return dict(sentiment='{}'.format(sentiment_prediction),
confidence=confidence) | 2095c382a62040535118c4eb377920739f403dd1 | 689,703 |
def is_mapping_table(table_id):
"""
Return True if specified table is a mapping table
:param table_id: identifies the table
:return: True if specified table is an mapping table, False otherwise
"""
return table_id.startswith('_mapping_') | 165e69766713a8e612f2823790363128c16d090e | 689,705 |
def fsplit(pred, objs):
"""Split a list into two classes according to the predicate."""
t = []
f = []
for obj in objs:
if pred(obj):
t.append(obj)
else:
f.append(obj)
return (t, f) | c5a9c9a952d07218408c6cd869ca95d2051ba1ec | 689,707 |
def _attr_key(attr):
"""Return an appropriate key for an attribute for sorting
Attributes have a namespace that can be either ``None`` or a string. We
can't compare the two because they're different types, so we convert
``None`` to an empty string first.
"""
return (attr[0][0] or ''), attr[0][1] | 6f29da6f0906a9403150667b93f24f5b37386a1f | 689,717 |
import bz2
def unpack(requests):
"""Unpack a list of requests compressed in bz2"""
return [bz2.decompress(request) for request in requests] | b4df59d4c6e9f47558c36aaaa5f42d73bd590d23 | 689,719 |
import torch
def my_l1(x, x_recon):
"""Calculate l1 loss
Parameters
----------
x : torch.cuda.FloatTensor or torch.FloatTensor
Input data
x_recon : torch.cuda.FloatTensor or torch.FloatTensor
Reconstructed input
Returns
-------
torch.cuda.FloatTensor or torch.FloatTensor
"""
return torch.mean(torch.abs(x-x_recon)) | a7736bd2d4160546176520a6adbd1c218dc5cc14 | 689,723 |
def step_lstm(lstm, input_, h_0_c_0=None):
"""LSTMCell-like API for LSTM.
Args:
lstm: nn.LSTM
input_: [batch_size, input_size]
h_0_c_0: None or
h_0: [num_layers, batch_size, hidden_size]
c_0: [num_layers, batch_size, hidden_size]
Returns:
output: [batch_size, hidden_size]
h_1_c_1:
h_1: [num_layers, batch_size, hidden_size]
c_1: [num_layers, batch_size, hidden_size]
"""
output, h_1_c_1 = lstm(input_[None], h_0_c_0)
return output[0], h_1_c_1 | 3172267af8463dd491a9f58425bc5d8788fc119c | 689,730 |
import string
def title_to_url(title):
"""
Converts a title string to a valid string for a url. White space will be
replaced by dash, case will be set to lower and all punctuation marks will
be removed.
:param title: the title string.
:type title: str
:return: a valid url string.
:rtype: str
"""
new = title.lower()
new = new.translate(str.maketrans('', '', string.punctuation))
new = new.replace(' ', '-')
return new | a077b425dd1b472d11ae178e8477577353120efa | 689,731 |
def create_prediction_series(lis, predictor):
"""
this function returns a list, lets call it p, of the length of the given lis
such that p[i] = the prediction of the i'th element in lis given the first i-1 elements
to make things nice, p[0] would be equal to lis[0]
:param lis:
:param predictor:
:return: the prediction list created
"""
p = [lis[0]]
for i in range(1, len(lis)):
p.append(predictor.predict(lis[: i]))
return p | 34ae81eaa8ed9287088c1fa726029953db728e6f | 689,732 |
def usable_class_name(node):
"""Make a reasonable class name for a class node."""
name = node.qname()
for prefix in ["__builtin__.", "builtins.", "."]:
if name.startswith(prefix):
name = name[len(prefix) :]
return name | f5f6b9a8e8da69cc7d04ea177b9a93ef823ef7e1 | 689,733 |
def is_pass_transistor(pip_json):
""" Returns boolean if pip JSON indicates pip is a pass transistor.
Always returns False if database lacks this information.
"""
if 'is_pass_transistor' in pip_json:
return bool(int(pip_json['is_pass_transistor']))
else:
return False | 6a6180f116dc3155450abb70ddde5884184af7cf | 689,735 |
def task10(number: int) -> None:
"""
Function that take an integer number as string and print the "It is an even number" if the number is even,
otherwise print "It is an odd number".
Input: number
Output: None
"""
if number % 2 == 0:
print("It is an even number")
else:
print("It is an odd number")
return None | fba82df5a9bb5c1490ae5c25d778c73b8c973121 | 689,736 |
def has_method(obj, method_name: str) -> bool:
"""
Returns True if the provided object (`obj`) has the named method (`method_name`).
"""
return callable(getattr(obj, method_name, None)) | 123d65b3d86427f3f7703cacac3580d818f96c22 | 689,737 |
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
# If `value` is naive, astimezone() will raise a ValueError,
# so we don't need to perform a redundant check.
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None) | 6b0464882cc50c80f410a3d78bd6baff3024da89 | 689,740 |
def ensure_other_is_scalar(matrix_method):
"""Simple decorator to check if second argument to a matrix method is a scalar."""
def wrapper(self, other, *args, **kwargs):
if not isinstance(other, (int, float, complex)):
raise ValueError(f"Cannot use {matrix_method} with 'other' of type {type(other)}.")
return matrix_method(self, other, *args, **kwargs)
return wrapper | 216c74420bc7ffeb043ce004c1922e7f6f484f58 | 689,741 |
def switch_team(team):
"""
Returns the opponent team given the specified team.
"""
if team == 'home':
return 'road'
elif team == 'road':
return 'home' | ab60a23b89046ebc1483576b260dd2b6edd6ea98 | 689,742 |
def get_extension(local_file: str) -> str:
"""Extract the file extension of a file."""
return local_file.rsplit(".", 1)[1].lower() | 13a02190f04a42374466b93ba70d0fa5264ba306 | 689,743 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.