content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_vehicle_attrs_url(props):
""" Returns vehicle properties from dict as URL params for Routino """
param = '{key}={val};'
return ''.join([param.format(key=k, val=v) for k, v in props.items()]) | abeddc842dc2b62b5301a9981e852ff8fbdd898e | 684,631 |
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000 | ae5fdc44ce7d7f94b04c424b5f8e885a6d0c97f1 | 684,632 |
def get_cls_import_path(cls):
"""Return the import path of a given class"""
module = cls.__module__
if module is None or module == str.__module__:
return cls.__name__
return module + '.' + cls.__name__ | b90a7cc5a65166eb520aa82cc5051f95cebf3dbb | 684,634 |
def notas(*n, situacao=False):
"""
==> função para analisar a nota e situação de vários alunos.
:param n: valores para analisar a situação (aceita vários números)
:param situacao: analisa a situação como ruim, razoável ou boa,
:return: retorna um dicionário com as informações.
"""
dic = dict()
dic['total'] = len(n)
dic['maior'] = max(n)
dic['menor'] = min(n)
dic['media'] = sum(n)/len(n)
if situacao:
if dic['media'] <= 5:
dic['situacao'] = 'RUIM'
elif 5 < dic['media'] <= 7:
dic['situacao'] = 'RAZOAVEL'
elif dic['media'] > 7:
dic['situacao'] = 'BOA'
return dic | 919d858b741d0bb8b0abcbdb1026189724e94d76 | 684,639 |
import bisect
def clip_around_detections_instant(detections, time):
"""Return the narrowest sublist of detections (a list of Detections)
containing time. If there is a detection at exactly time, try to
expand in either direction.
"""
times = [d.timestamp for d in detections]
lo = max(bisect.bisect_left(times, time) - 1, 0)
hi = min(bisect.bisect(times, time) + 1, len(times))
return detections[lo:hi] | fa44890108341437664b420a6251ad2a4f0fb0f7 | 684,644 |
def calculate_interval(timeout_int):
"""Calculates interval based on timeout.
Some customers require long timeouts and polling every 0.1s results
very longs logs. Poll less often if timeout is large.
"""
if timeout_int > 60:
interval = 3
elif timeout_int > 10:
interval = 1
else:
interval = 0.1
return interval | 402ffe83f667fe2f7ee447cfd216062c3b813d3f | 684,647 |
def get_attribute(s, ob):
"""
Break apart a string `s` and recursively fetch attributes from object `ob`.
"""
spart = s.partition('.')
f = ob
for part in spart:
if part == '.':
continue
f = f.__getattribute__(part)
return f | bdbd92e0ef1d70f4e031ee0f66292c0de93d8813 | 684,650 |
def desi_proc_command(prow, queue=None):
"""
Wrapper script that takes a processing table row (or dictionary with NIGHT, EXPID, OBSTYPE, JOBDESC, PROCCAMWORD defined)
and determines the proper command line call to process the data defined by the input row/dict.
Args:
prow, Table.Row or dict. Must include keyword accessible definitions for 'NIGHT', 'EXPID', 'JOBDESC', and 'PROCCAMWORD'.
queue, str. The name of the NERSC Slurm queue to submit to. Default is None (which leaves it to the desi_proc default).
Returns:
cmd, str. The proper command to be submitted to desi_proc to process the job defined by the prow values.
"""
cmd = 'desi_proc'
cmd += ' --batch'
cmd += ' --nosubmit'
cmd += ' --traceshift'
if queue is not None:
cmd += f' -q {queue}'
if prow['OBSTYPE'].lower() == 'science':
if prow['JOBDESC'] == 'prestdstar':
cmd += ' --nostdstarfit --nofluxcalib'
elif prow['JOBDESC'] == 'poststdstar':
cmd += ' --noprestdstarfit --nostdstarfit'
elif prow['OBSTYPE'].lower() == 'dark':
cmd += ' --nightlybias'
pcamw = str(prow['PROCCAMWORD'])
cmd += ' --cameras={} -n {} -e {}'.format(pcamw, prow['NIGHT'], prow['EXPID'][0])
if prow['BADAMPS'] != '':
cmd += ' --badamps={}'.format(prow['BADAMPS'])
return cmd | 13fd9c22c5cdb29aeb1f174100be19c253ba1052 | 684,656 |
import random
def random_string(strlen):
"""Our little helper function for creating a random string
"""
return ''.join([
random.choice('ABCDEFGHIJKLMNPQRSTUVWXYZ0123456789') #Note no O (letter 'OH')
for _ in range(strlen)
]) | a3770bb61943bfb7e2ff83b9368571ac05118933 | 684,657 |
def get_gdeploy_cmd(gdeploy_file):
"""
Return process args list for gdeploy run.
"""
gdeploy_command = [
'gdeploy',
'-c',
gdeploy_file
]
return gdeploy_command | f23e0690e23e98652deacef05c5e414596acfd71 | 684,661 |
def get_review_status(context):
"""Gets the review status of an award"""
FSJ_user = context['FSJ_user']
award = context['award']
return award.get_review_status(FSJ_user) | 126a9eeff9cb1650747130040437d7ae3f88bc17 | 684,667 |
def sign (x):
"""Returns the sign of `x` as `-1`, `0`, or `+1`."""
return 0 if x == 0 else +1 if x > 0 else -1 | 48dac83efb1e9be1c5936695b0c805535187bcc7 | 684,673 |
def number_of_lines(filename=""):
"""Returns the number of lines of a text file.
Keyword Arguments:
filename {str} -- file name (default: {""})
Returns:
Int -- number of lines of a text file.
"""
with open(filename, mode="r", encoding="utf-8") as file:
return len(file.readlines()) | 63ffab8fa133356354052591e8b29101ad267ad9 | 684,676 |
def build_data(_data, kwds):
"""
Returns property data dict, regardless of how it was entered.
:param _data: Optional property data dict.
:type _data: dict
:param kwds: Optional property data keyword pairs.
:type kwds: dict
:rtype: dict
"""
# Doing this rather than defaulting the _data arg to a mutable value
data = {} if _data is None else _data
data.update(kwds)
return data | 707ce1939e2fb6ce4359ed528fcfeac68d37f6b3 | 684,678 |
def GetXMLTreeRoot(tree):
"""Get the root node of an xml tree."""
root = tree.getroot()
return root | aa1f1e353ec010267bd6ec32745e001ea4038850 | 684,683 |
def all_files_fixture(
bad_link, link_dir, link_txt_file, tmp_path, txt_file, jpeg_file, zip_file
):
"""Return a dict of different fixture file cases."""
return {
"bad_link": bad_link,
"link_dir": link_dir,
"link_txt_file": link_txt_file,
"tmp_path": tmp_path,
"txt_file": txt_file,
"txt_file_parent": txt_file.parent,
"jpeg_file": jpeg_file,
"zip_file": zip_file,
"zip_file_parent": zip_file.parent,
} | 2618523e5b22be05f13c2389556dbda50b663fd9 | 684,684 |
def append_unless(unless, base, appendable):
"""
Conditionally append one object to another. Currently the intended usage is for strings.
:param unless: a value of base for which should not append (and return as is)
:param base: the base value to which append
:param appendable: the value to append to base
:return: base, if base == unless; base + appendable, otherwise.
"""
return base if base == unless else base + appendable | 0ec77b6a5156f03b598abb793264cab2f6a64528 | 684,688 |
import typing
def override_parameter_in_conf(configuration: typing.Dict, override_parameter: typing.Optional[typing.Dict]):
"""
Given a configuration dict (mapping from hyperparameter name to value), it will override the values using an
override dict (mapping from hyperparameter name to new value)
"""
if override_parameter is None:
return configuration
for key, new_value in override_parameter.items():
if key not in configuration:
raise ValueError()
else:
configuration[key] = new_value
return configuration | a3d8fc79c991bee0b7f3078eef640eef94358b55 | 684,689 |
import inspect
import logging
def get_module_logger(sublogger:str=""):
""" Return a logger class for the calling module.
Add a 'sublogger' string to differentiate multiple loggers in a module.
"""
caller = inspect.getmodulename(inspect.stack()[2][1])
if caller is None:
caller = __name__
return logging.getLogger(f"{caller}.{sublogger}") | be92b0d1f7ce6574f8d33dce03ac937b007b3fbd | 684,691 |
from typing import OrderedDict
def dict_partition(d, keyfunc, dict=OrderedDict):
"""
Partition a dictionary.
Args:
d (dict): the dictionary to operate on.
keyfunc (function): the function to partition with. It must accept the
dictionary key and value as arguments, and should return a boolean.
dict (type): the type of dictionaries to return.
Return:
(dict, dict): all of the elements for which the key function returned
True, and all of the elements for which it returned False.
"""
left = dict()
right = dict()
for key, value in d.items():
if keyfunc(key, value):
left[key] = value
else:
right[key] = value
return left, right | 33c9c22154e1e15ad637381ffd7aa5bc30f1e7a8 | 684,693 |
def prompt_propositions(proposals, message_add="", integer=False):
""" Asks the user to choose from among the proposals.
The propositions must be in the form of a dictionary keys, options.
The user is asked to enter the key of the desired proposal.
If the answer is not in the dictionary keys of proposals, the request is repeated.
:param proposals: proposals dictionary
:param message_add: additional message to display
:param integer: True if the input must be converted in integer
:return: input
"""
proposal_message = ""
for cle, item in proposals.items():
proposal_message += f"soit: {cle} pour {item}.\n"
message = message_add + "\n Choisissez parmi: \n" + proposal_message
error_message = "Votre réponse ne correspond pas. \n" \
"Veuillez indiquer : \n"
error_message += proposal_message
response = input(message)
if integer:
response = int(response)
while response not in proposals:
response = input(error_message)
if integer:
response = int(response)
return response | 4cdf2bc44705571345055e8448f0619e95f2e5bc | 684,694 |
import logging
def label_correcting(graph, start, target, Queue=None):
"""
Find the shortest path in graph from start to target.
Parameters
----------
graph : object
start : object
Node in the graph.
target : object
Node in the graph.
Queue : class
Datastructure which supports "append", "pop" and "in".
Returns
-------
list or None
List of nodes, starting with start and ending with target or None if
no path from start to target exists.
"""
if Queue is None:
Queue = list
# Initialize distances
for node in graph.nodes:
node.dist = float("inf")
start.dist = 0
u = float("inf")
q = Queue()
q.append(start)
# Traverse the graph
while len(q) > 0:
x = q.pop()
logging.info("Traverse '%s'...", x)
for y in graph.children(x):
if x.dist + graph.dist(x, y) < min(y.dist, u):
y.dist = x.dist + graph.dist(x, y)
y.parent = x
if y != target and y not in q:
q.append(y)
if y == target:
u = x.dist + graph.dist(x, y)
# Reconstruct the shortest path
shortest_path = None
if target.parent is not None:
shortest_path = []
current_node = target
while current_node != start:
shortest_path.append(current_node.identifier)
current_node = current_node.parent
shortest_path.append(start.identifier)
shortest_path = shortest_path[::-1]
return shortest_path | 625f785f00755b7d530849bcd8667e52a4a9807b | 684,698 |
def parse_counts_line(line):
"""
Parses the counts line of a molecule and returns it asd a dictionary
aaabbblllfffcccsssxxxrrrpppiiimmmvvvvvv
aaa = number of atoms (current max 255)*
bbb = number of bonds (current max 255)*
lll = number of atom lists (max 30)*
fff = (obsolete)
ccc = chiral flag: 0=not chiral, 1=chiral
sss = number of stext entries
xxx = (obsolete)
rrr = (obsolete)
ppp = (obsolete)
iii = (obsolete)
mmm = number of lines of additional properties,
vvvvv = version for the format
"""
ret = {}
ret["aaa"] = int(float(line[0:3]))
ret["bbb"] = int(float(line[3:6]))
ret["lll"] = int(float(line[6:9]))
ret["ccc"] = int(float(line[12:15]))
ret["sss"] = int(float(line[15:18]))
ret["mmm"] = int(float(line[18:21]))
ret["vvvvv"] = line[-5:]
return ret | 98ffa7c1646552f4e5378c01b8c39667ab7664bb | 684,699 |
def trans_matrix(M):
"""Take the transpose of a matrix."""
n = len(M)
return [[ M[i][j] for i in range(n)] for j in range(n)] | 142baf4664d84e10ab68fe094c4eac510abbcf44 | 684,701 |
import re
def decontract(phrase):
"""
Substitutes occurrences in the text like
n't to not
're to are
'll to will
eg.
haven't -> have not
must've -> must have
:type phrase : string
:returns phrase : decontracted phrase
"""
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase | 0c49d8f5e5fdedb4f430744379656a4f8fb3dbce | 684,708 |
def typename(type):
"""
Get the name of `type`.
Parameters
----------
type : Union[Type, Tuple[Type]]
Returns
-------
str
The name of `type` or a tuple of the names of the types in `type`.
Examples
--------
>>> typename(int)
'int'
>>> typename((int, float))
'(int, float)'
"""
try:
return type.__name__
except AttributeError:
if len(type) == 1:
return typename(*type)
return "(%s)" % ", ".join(map(typename, type)) | 3e1e2068dcb460c949b3de3b2ba41b3aa0f9b447 | 684,712 |
def atom_count(data, **params):
"""
Calculate number of occurrencies of a given atomic element in the data.
Args:
data (list): values.
params (kwargs):
atom: element for which occurencies are counted.
Returns the number of occurencies of the atom in the data.
"""
atom = params['atom']
counter = sum([elem == atom for elem in data])
return counter | 326e66a850e9355fb6494bff4a4288223cb5d172 | 684,713 |
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index | f9834a0860c5c2fa107a1af39be91a2c918cbf51 | 684,717 |
import re
import keyword
def _make_valid_attribute_name(s):
"""Return a string that is a valid Python attribute name.
Leading digits are prefixed with underscores, non-alpha numeric characters
are replaced with underscores, and keywords are appended with an underscore.
This function ensures the string can be used as a valid object attribute
name.
"""
if not s.isidentifier():
s = re.sub(r"[^0-9a-zA-Z_]", r"_", s)
s = re.sub(r"^([0-9]+)", r"_\1", s)
if keyword.iskeyword(s):
s = s + "_"
return s | 877f756d37ca8f22e10febe907e2c9def35dcc8a | 684,719 |
def parse_resolution(resolution_string):
"""
Parse and raise ValueError in case of wrong format
@param resolution_string string representing a resolution, like "128x128"
@return resolution as a tuple of integers
"""
tokens = resolution_string.split('x')
if len(tokens) != 2:
raise ValueError
return tuple(int(t) for t in tokens) | c13244a06170e33db213ebceec689a5cb8c72c4f | 684,721 |
def get_previous_season(season):
"""
Convert string e.g. '1819' into one for previous season, i.e. '1718'
"""
start_year = int(season[:2])
end_year = int(season[2:])
prev_start_year = start_year - 1
prev_end_year = end_year - 1
prev_season = "{}{}".format(prev_start_year, prev_end_year)
return prev_season | 8805644b03ffbb3d81185f6acaa9225a84a0814b | 684,724 |
def test_equalto(value, other):
"""Test to see if two values are the same."""
return value == other | e447d9161deeba39f660bc595ff0ea600d7dc129 | 684,727 |
import bisect
def _find_closest(sorted_values,
value,
before=False):
"""
Convenience function for finding the list index of the first (leftmost) value greater than x in list sorted_values.
:param sorted_values:
:param value:
:param before: if True then return the first (leftmost) value less than x in the sorted_values list, otherwise
return the last (rightmost) value greater than x in the sorted_values list
:return: index of the first (leftmost) value greater than x in the sorted_values list
:rtype: int
"""
if before:
index = bisect.bisect_left(sorted_values, value)
else:
index = bisect.bisect_right(sorted_values, value)
if index != len(sorted_values):
return index
raise ValueError | 7964e9353684af3359d1081a527a3ab238876ddd | 684,731 |
def get_indentation(line):
"""Returns the indentation (number of spaces and tabs at the begining) of a given line"""
i = 0
while (i < len(line) and (line[i] == ' ' or line[i] == '\t')):
i += 1
return i | 1c173c7aca678d62af02de0e077a1ec365ab3e31 | 684,733 |
def merge_diffs(d1, d2):
"""
Merge diffs `d1` and `d2`, returning a new diff which is
equivalent to applying both diffs in sequence. Do not modify `d1`
or `d2`.
"""
if not isinstance(d1, dict) or not isinstance(d2, dict):
return d2
diff = d1.copy()
for key, val in d2.items():
diff[key] = merge_diffs(diff[key], val) if key in diff else val
return diff | 7314ad63a4679308d27bcff956c9b570d89df8a7 | 684,739 |
def subsample_image(input_image, zoom_box_coords):
"""
Crops the input image to the coordinates described in the
'zoom_box_coords' argument.
Args:
input_image (numpy.array): The input image.
zoom_box_coords (tuple, list): Coordinates corresponding to the first
(low-resolution) image. Coordinates are described and ordered as
follows: (x, y, width in pixels, height in pixels), where 'x' and 'y'
describe the top-left of the box.
Default is None, which draws no box and shows no zoomed images in the
row below.
Returns:
(numpy.array) The cropped image.
Notes:
Code adapted from:
https://stackoverflow.com/questions/39382412/crop-center-portion-of-a-numpy-image
"""
start_x, start_y = zoom_box_coords[0:2]
return input_image[start_y:start_y+zoom_box_coords[3], start_x:start_x+zoom_box_coords[2]] | 22b7d4e0b4e964c0e97ed5be9b3770f64bc9894b | 684,743 |
import requests
def get_auth(url: str, login: str, password: str) -> tuple:
"""
Function get authentication token and user ID from Rocket.Chat.
:param url: Rocket.Chat API login URL
:type: str
:param login: Rocket.Chat user login
:type: str
:param password: Rocket.Chat user password
:type: str
:return: tuple with userID and authToken
:rtype: tuple
"""
try:
headers = {'Content-Type': 'application/json'}
timeout = (1, 3)
resp = requests.post(url, headers=headers, json={'username': login, 'password': password}, timeout=timeout)
resp_json = resp.json()
if resp_json['status'] == 'success':
return resp_json['data']['userId'], resp_json['data']['authToken']
else:
return resp_json['status'], resp_json['error']
except requests.exceptions.SSLError:
raise SystemExit('ERROR: Cannot verify SSL Certificate.')
except requests.exceptions.ConnectTimeout:
raise SystemExit('ERROR: Cannot connect to Rocket.Chat API - connection timeout')
except requests.exceptions.ConnectionError as e:
raise SystemExit("ERROR: Cannot connect to Rocket.Chat API {}.".format(e)) | f7aece915415cd136a042534eee641b3ac7663ff | 684,745 |
def split(list_to_split, amount_of_parts):
"""Split a list into equal parts
Args:
list_to_split: Any list
amount_of_parts: Number of equally sized parts
Returns:
splitted list as list of lists
"""
k, m = divmod(len(list_to_split), amount_of_parts)
return (list_to_split[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(amount_of_parts)) | b5355d1c3f2bd271df969a92c72ffcece7d66bcc | 684,747 |
def input_profile(t, x0):
"""
calculate the user input x as a function of time
x0 is the initial value
ramp down from 100% to 50% load at 5%/min rate
hold for half an hour
ramp up from 50% to 100% load at 5%/min rate
hold for 20 minutes
"""
if t < 60:
x = x0
elif t < 660:
x = x0*(1-(t-60)/600*0.5)
elif t < 2460:
x = x0*0.5
elif t < 3060:
x = x0*(0.5+(t-2460)/600*0.5)
else:
# Hold for 1200 sec to 4260 sec
x = x0
return x | f69a1fd1afa09b7dfcb9ab5ba7d0111e0ca3022c | 684,748 |
def corr_shift(x, y, x_shift=0, y_shift=0):
"""compute the correlation with shift
Args:
x (series): first series
y (series): second series
x_shift (int, optional): shift of first series. Defaults to 0.
y_shift (int, optional): shift of second series. Defaults to 0.
Returns:
float: correlation
"""
return x.shift(x_shift).corr(y.shift(y_shift)) | 3d209c9649cf9084fd2e46108069256bff243afc | 684,750 |
def GetOperationError(error):
"""Returns a human readable string representation from the operation.
Args:
error: A string representing the raw json of the operation error.
Returns:
A human readable string representation of the error.
"""
return 'OperationError: code={0}, message={1}'.format(
error.code, error.message) | 02d41be83d612c2076c35047e31664127036b0ea | 684,753 |
def map_age(age):
"""Map ages to standard buckets."""
try:
age = int(age)
if age >= 90:
return '90+'
lower = (age // 10) * 10
upper = age+9 if age % 10 == 0 else ((age + 9) // 10) * 10 - 1
return f'{lower}-{upper}'
except:
if 'month' in age.lower():
return '0-9'
return age | 032fe2f2fe358c8def6c008458941d111c745330 | 684,754 |
def remove_url_trailing_slash(url):
"""
Returns the input url without any trailing / if it had a trailing slash. This is useful for repository url
where https://github.com/lwhjon/repo-labels-cli/ and https://github.com/lwhjon/repo-labels-cli both are equivalent
hence for consistency we remove the trailing / for repository url
:param url: The url to be formatted
:return: Returns the url without any trailing /
"""
return url[:-1] if url[-1] == '/' else url | 29d35d2f2512762fa7997cc26a6159608a769855 | 684,755 |
def set_intersection(*sets):
"""Return the intersection of all the given sets.
As of Python 2.6 you can write ``set.intersection(*sets)``.
Examples
========
>>> from sympy.core.compatibility import set_intersection
>>> set_intersection(set([1, 2]), set([2, 3]))
set([2])
>>> set_intersection()
set()
"""
if not sets:
return set()
rv = sets[0]
for s in sets:
rv &= s
return rv | 0e8639af9a00d0d57e0855bbdfd19a32c8f5786b | 684,758 |
def get_mf6_blockdata(f, blockstr):
"""Return list with all non comments between start and end of block
specified by blockstr.
Parameters
----------
f : file object
open file object
blockstr : str
name of block to search
Returns
-------
data : list
list of data in specified block
"""
data = []
# find beginning of block
for line in f:
if line[0] != "#":
t = line.split()
if t[0].lower() == "begin" and t[1].lower() == blockstr.lower():
break
for line in f:
if line[0] != "#":
t = line.split()
if t[0].lower() == "end" and t[1].lower() == blockstr.lower():
break
else:
data.append(line.rstrip())
return data | 5363b566dfb48190db5945875f9924294a7d0b3b | 684,761 |
def filter_genre(genre):
"""
Create filter function for row of pandas df
:param genre: string genre to filter out
:return: Function that returns True if genre is in the row genre string
"""
def wrap(row):
genres = row['genre']
if isinstance(genres, str):
return genre in genres.split(', ')
return False
return wrap | 8d8a2a7fc2bc370f2c42974406d4d65f6a45c1dd | 684,762 |
def GenerateContext(kind, project_id, location, cluster_id):
"""Generates a kubeconfig context for an Anthos Multi-Cloud cluster.
Args:
kind: str, kind of the cluster e.g. aws, azure.
project_id: str, project ID accociated with the cluster.
location: str, Google location of the cluster.
cluster_id: str, ID of the cluster.
Returns:
The context for the kubeconfig entry.
"""
template = 'gke_{kind}_{project_id}_{location}_{cluster_id}'
return template.format(
kind=kind,
project_id=project_id,
location=location,
cluster_id=cluster_id) | a1b48af02f78a31829e26b5b2366337160e41f32 | 684,764 |
def fix_ncesid(ncesid, mode):
"""
Applies standard formatting (zero padding and typecasting) to
both schools' and districts' NCES IDs.
Args:
ncesid (int): Target NCES ID to fix (e.g. 100005).
mode (str): Should be either "school" or "district".
Returns:
str: Standardized NCES ID (does not perform zero padding if
unknown mode is porvided).
"""
padding = {
"school": 12,
"district": 7
}.get(mode, 0)
return str(ncesid).zfill(padding) | 02d0db477039ef30beefff5470ec3781de9fc052 | 684,766 |
def predict(clf, test_data, probabilities=True):
"""
Returns an array of predictions for the given *test_data* using the classifier *clf*.
If *probabilities* is True and the classifier supports it, the predictions will be Preictal probabilites.
Otherwise, the class labels are used.
:param clf: The classifier to use.
:param test_data: The data to predict labels for.
:param probabilities: If True and the classifier supports it, the array will contain class probabilites. Otherwise
it will contain 0-1 class guesses.
:return: An ndarray with the class predictions for the test data.
"""
if probabilities and hasattr(clf, 'predict_proba'):
predictions = clf.predict_proba(test_data)
# The predictions from predict_proba is a k-dimensional array, with k
# the number of classes. We want to take the column corresponding to the
# class with the label 1
if hasattr(clf, 'best_estimator_'):
classes = clf.best_estimator_.classes_
else:
classes = clf.classes_
class_index = list(classes).index(1)
predictions = predictions[:, class_index]
else:
predictions = clf.predict(test_data)
return predictions | 3ec17f338a245e476d891ffb99d0b515fe9b4072 | 684,769 |
import random
import string
def _generate_postcode() -> str:
"""
Generates a postcode string. This is not guaranteed to be valid currently, but will
have the properties of
- One letter
- 1 or 2 numbers
- A space
- A number and 2 letters.
:returns: Postcode string
"""
first_letter = random.choice(string.ascii_uppercase)
numbers = random.randint(1, 30)
last_number = random.randint(1, 9)
last_letters = ''.join(random.choice(string.ascii_uppercase) for _ in range(2))
return first_letter + str(numbers) + ' ' + str(last_number) + last_letters | e2d34f14bd6b12b944daf45d7961aa37b017463d | 684,770 |
import json
def _response(**resp):
"""
Return an API Gateway compatible response.
"""
return {'body': json.dumps(resp)} | 66c8f9f2cdf9043bd4b9f1224aae4070978ee6b8 | 684,772 |
def build_independent_priors(priors):
""" Build priors for Bayesian fitting. Priors should has a (scipy-like) ppf class method."""
def prior_transform(u):
v = u.copy()
for i in range(len(u)):
v[i] = priors[i].ppf(u[i])
return v
return prior_transform | 7749b596e38d928ef4262a296d72cece433fca88 | 684,773 |
import re
def is_valid_recurrence(text):
"""Check that text is a valid recurrence string.
A valid recurrence string is 'DAILY', 'ONCE', 'WEEKDAYS', 'WEEKENDS' or
of the form 'ON_DDDDDD' where D is a number from 0-7 representing a day
of the week (Sunday is 0), e.g. 'ON_034' meaning Sunday, Wednesday and
Thursday
Arg:
text(str): the recurrence string to check
Returns:
bool: True if the recurrence string is valid, else False
Examples:
::
>>> from soco.alarms import is_valid_recurrence
>>> is_valid_recurrence('WEEKENDS')
True
>>> is_valid_recurrence('')
False
>>> is_valid_recurrence('ON_132') # Mon, Tue, Wed
True
>>> is_valid_recurrence('ON_777') # Sat
True
>>> is_valid_recurrence('ON_3421') # Mon, Tue, Wed, Thur
True
>>> is_valid_recurrence('ON_123456789') # Too many digits
False
"""
if text in ("DAILY", "ONCE", "WEEKDAYS", "WEEKENDS"):
return True
return re.search(r'^ON_[0-7]{1,7}$', text) is not None | 442eb71463b54216b2c9d4b580916939c4091acc | 684,775 |
import re
def split_into_attributes(s):
"""Split each purchase into a list of its attributes"""
return re.split(r"\t",s) | 883c6cb1c0eaaa8a9fd37825a7ae0fa1373b43db | 684,776 |
from typing import Tuple
import re
def parse_func_path(path: str) -> Tuple[str, str]:
"""
Parses a function path 'file_or_module::func'.
Parameters
----------
path : str
path should have the format: 'file_or_module::func', where `file_or_module`
is a filename or python module and `func` is a function name.
Returns
-------
Tuple[str, str]
A tuple of file_or_module and function name.
"""
match = re.match(r"(?P<file>.+)::(?P<func>.+)", path)
if not match:
raise ValueError("Invalid function specification: '{}'".format(path))
return match["file"], match["func"] | cc20236ec36f474b2d2ef6e4815224ddf2ce3d23 | 684,777 |
def make_patterns(dirs):
"""Returns a list of git match patterns for the given directories."""
return ['%s/**' % d for d in dirs] | 65c5d5c91a2e149aeebaa40d2a265ba4da62bd0f | 684,779 |
def normalize_basename(s, force_lowercase=True, maxlen=255):
"""Replaces some characters from s with a translation table:
trans_table = {" ": "_",
"/": "_slash_",
"\\": "_backslash_",
"?": "_question_",
"%": "_percent_",
"*": "_asterisk_",
":": "_colon_",
"|": "_bar_",
'"': "_quote_",
"<": "_lt_",
">": "_gt_",
"&": "_amp_"}
then if the generated name is longer than maxlen, the name is truncated
to maxlen and the hash of the name modulo 0xffffffff is appended.
"""
# replace all whietspaces by _
l = s.lower() if force_lowercase else s
# table = mktrans(" ", "_")
# return l.translate(table)
trans_table = {" ": "_",
"/": "_slash_",
"\\": "_backslash_",
"?": "_question_",
"%": "_percent_",
"*": "_asterisk_",
":": "_colon_",
"|": "_bar_",
'"': "_quote_",
"<": "_lt_",
">": "_gt_",
"&": "_amp_"}
n = ("".join([trans_table.get(x, x) for x in l]))
if len(n) > maxlen - 8:
h = format(hash(n) & 0xffffffff, "08x")
n = n[:maxlen-8] + "_"+ h
return n | 8b6c6fee3a55b3d704294d8bdaa7f72101ac477b | 684,780 |
import struct
def opt_int64(buf, byte_order):
"""
Convert to a signed 64-bit integer.
"""
opt_val, = struct.unpack(byte_order+"q", buf)
return opt_val | dfe811da21698bf407c0e4fe8c33f9797bff0894 | 684,782 |
def get_line_offsets(block):
""" Compute the list of offsets in DataBlock 'block' which correspond to
the beginnings of new lines.
Returns: (offset list, count of lines in "current block")
"""
# Note: this implementation based on string.find() benchmarks about twice as
# fast as a list comprehension using re.finditer().
line_offsets = [0]
line_count = 0 # Count of lines inside range [block.start, block.end) *only*
s = block.data
while True:
next_newline = s.find("\n", line_offsets[-1])
if next_newline < 0:
# Tack on a final "line start" corresponding to EOF, if not done already.
# This makes it possible to determine the length of each line by computing
# a difference between successive elements.
if line_offsets[-1] < len(s):
line_offsets.append(len(s))
return (line_offsets, line_count)
else:
line_offsets.append(next_newline + 1)
# Keep track of the count of lines within the "current block"
if next_newline >= block.start and next_newline < block.end:
line_count += 1 | 1409b3c028f76e144280936030f664285db9c7e0 | 684,789 |
def numbertoampm(hour: int, minute: int) -> tuple:
"""
Convert time in hh:mm format to AM/PM.
"""
if hour < 12 or hour == 24:
period = 'AM'
else:
period = 'PM'
hour = hour % 12
if hour == 0:
hour = 12
return (hour, minute, period) | 0aa1d52f9f93928d7ecc6167c64fc85e0e65d0e0 | 684,790 |
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
return True if 'Misc/NEWS' in file_paths else False | eed3c6a31f7fb16f25202e9d117d944d628985c2 | 684,792 |
def heuristic_distances(judgments, repeats):
"""
Returns a numeric value for each distance (i, j) in judgments: d = (a + 1)/ (a + b + 2)
a is number of times a distance is greater than another
:param repeats: number of times each pairwise comparison is repeated
:param judgments: (dict) key: pairs of distances (i,j) (m,n), value: counts
:return: dict of distances (pair (i, j) -> distance value)
"""
def increment(d, key, val):
if key not in d:
d[key] = val
else:
d[key] += val
# take a pass through judgments and keep track of 'a' (win) and 'b' (loss) for each distance
win = {} # for readability, call 'a' WIN and call 'b' LOSS
loss = {}
distance = {}
for comparison, count in judgments.items():
increment(win, comparison[0], count)
increment(loss, comparison[0], repeats - count)
increment(loss, comparison[-1], count)
increment(win, comparison[-1], repeats - count)
# map win, loss values to distances
for pair in win:
distance[pair] = (win[pair] + 1) / float(win[pair] + loss[pair] + 2)
return distance | 4788286b53cca8b6b51d9b52791f0f0028dbcd64 | 684,797 |
import requests
import json
def getUrl(articleName, fqdn='https://en.wikipedia.org/', apiPath='w/api.php', exceptNull=False):
"""Uses the WikiMedia API to determine the ID of a page with the given
title, which is then used to construct a stable URL for the corresponding
page.
"""
queryString = '?action=query&prop=info&format=json&titles=' + articleName.replace(' ', '%20')
res = requests.get(fqdn + apiPath + queryString)
data = json.loads(res.content)
pages = data['query']['pages']
keys = list(pages.keys())
id = int(keys[0])
if id < 0 and exceptNull:
raise Exception('Null page returned for article name "%s"' % articleName)
return '%s?curid=%u' % (fqdn, id) | ef667eb6bd758a620317f87138e2dc283cbb56c8 | 684,798 |
import struct
def set_int(bytearray_: bytearray, byte_index: int, _int: int):
"""Set value in bytearray to int
Notes:
An datatype `int` in the PLC consists of two `bytes`.
Args:
bytearray_: buffer to write on.
byte_index: byte index to start writing from.
_int: int value to write.
Returns:
Buffer with the written value.
Examples:
>>> data = bytearray(2)
>>> snap7.util.set_int(data, 0, 255)
bytearray(b'\\x00\\xff')
"""
# make sure were dealing with an int
_int = int(_int)
_bytes = struct.unpack('2B', struct.pack('>h', _int))
bytearray_[byte_index:byte_index + 2] = _bytes
return bytearray_ | b2c6a6770afab55ee15ebdbb63a63d9be8f2e998 | 684,801 |
def title(s: str) -> str:
"""Capitalize sentence.
``"foo bar" -> "Foo Bar"``
``"foo-bar" -> "Foo Bar"``
"""
return ' '.join(
p.capitalize()
for p in s.replace('-', ' ')
.replace('_', ' ').split()) | a2de8c3d39d86b2cba920853310a0e47dbd861b6 | 684,802 |
def train_calc_split(pd_shots, match_id, features, label='is_goal'):
"""
INPUT
pd_shots: (pandas) shots data (all type / on Target)
match_id: statsbomb match_id
features: list of features (column names)
label: label column name
OUTPUT
train_x: shots data
calc_x: shots data of the specified match_id
train_y: label data
calc_y: label data of the specified match_id
"""
pd_train = pd_shots[pd_shots['match_id'] != match_id]
train_x = pd_train[features]
train_y = pd_train[label]
pd_calc = pd_shots[pd_shots['match_id'] == match_id]
calc_x = pd_calc[features]
calc_y = pd_calc[label]
return train_x, calc_x, train_y, calc_y | d1a65daa65f0382408db7f4274ce497b804f0675 | 684,808 |
def get_max_split(splits, keyfunc):
""" Returns the split in a transaction with the largest absolute value
Args:
splits (List[dict]): return value of group_transactions()
keyfunc (func): key function
Returns:
(Tuple[str]): splits collapsed content
Examples:
>>> from operator import itemgetter
>>> splits = [{'amount': 350}, {'amount': -450}, {'amount': 100}]
>>> get_max_split(splits, itemgetter('amount')) == (1, {'amount': -450})
True
>>> splits = [{'amount': 350}, {'amount': -350}]
>>> get_max_split(splits, itemgetter('amount')) == (0, {'amount': 350})
True
"""
maxfunc = lambda enum: abs(keyfunc(enum[1]))
return max(enumerate(splits), key=maxfunc) | 4a3141337fb6e55bd2eb657b78783016bb0e80b3 | 684,810 |
def breadcrumbs_li(links):
"""Returns HTML: an unordered list of URLs (no surrounding <ul> tags).
``links`` should be a iterable of tuples (URL, text).
"""
crumbs = ""
li_str = '<li><a href="{}">{}</a></li>'
li_str_last = '<li class="active"><span>{}</span></li>'
# Iterate over the list, except for the last item.
if len(links) > 1:
for i in links[:-1]:
crumbs += li_str.format(i[0], i[1])
# Add the last item.
crumbs += li_str_last.format(links[-1][1])
return crumbs | eaf510564366858767f0e8823503c2d660028b45 | 684,811 |
def heuristicPortMatch(p1, p2):
"""takes two ports and returns 1 if exact match,
0 if partial match, -1 if no match
"""
if p1.db_moduleId == p2.db_moduleId:
return 1
elif p1.db_type == p2.db_type and \
p1.db_moduleName == p2.db_moduleName and \
p1.sig == p2.sig:
return 0
return -1 | 0e620e8687f5eb171e5922d78e76b47537c5e46f | 684,814 |
def make_dict(list1, list2):
"""
Makes a dictionary using the provided lists.
Input:
list1 (list): List to be used for keys.
list2 (list): list to be used for values.
Output:
out_dict (dict): Dictionary using the input lists
"""
out_dict = {}
i = 0
for item in list1:
out_dict[item] = list2[i] if i < len(list2) else None
i += 1
return out_dict | c2731540b3a957a08b4a204ade9a54f91339ab0b | 684,817 |
def partition_fxn(output_string):
""" Reads the parition function from the MESSPF output
:param str output_string: string of lines for MESSPF output file
:return temps: List of temperatures
:rtype: list: float
:return logq: loq(Q) where Q is partition function
:rtype: list: float
:return dq_dt: dQ/dT; 1st deriv. of Q w/r to temperature
:rtype: list: float
:return dq2_dt2: d^2Q/dT^2; 2nd deriv. of Q w/r to temperature
:rtype: list: float
"""
# Read the partition function and derivatives
temps, logq, dq_dt, dq2_dt2 = [], [], [], []
for i, line in enumerate(output_string.splitlines()):
if i not in (0, 1):
tmp = line.strip().split()
temps.append(tmp[0])
logq.append(tmp[1])
dq_dt.append(tmp[2])
dq2_dt2.append(tmp[3])
return temps, logq, dq_dt, dq2_dt2 | b10a871af81d804c020944501135826d296bf030 | 684,819 |
def mixin_enabled(plugin, key, *args, **kwargs):
""" Return if the mixin is existant and configured in the plugin """
return plugin.mixin_enabled(key) | 0d89dbbc381d875d2b5f401635d74e4403267269 | 684,833 |
def check_lammps_sim(out_file, verbose=True):
"""
Check if LAMMPS simulation is finished.
"""
FINISHED = False
try:
with open(out_file, 'r') as f:
lines = f.readlines()
if 'Total wall time' in lines[-1]:
FINISHED = True
except Exception as e:
if verbose:
print(e)
# print(lines[-1])
return FINISHED | a8266c9652c47cc4831f6bdfc1a4050bac6e94ce | 684,852 |
import re
def _date_tuple(strdate):
"""_date_tuple(strdate)
Converts a date string of the format "[YY]YY/[M]M/[D]D" into a 3-tuple
of month, day, and year integers.
Positional arguments:
strdate (str) - date string of the format "[YY]YY/[M]M/[D]D"
Returns:
tuple ((int, int, int)) - tuple of (year, month, day) integers
The general input date format should consist of the following, in order:
1. 1-2 digits
2. a delimeter from the set "/", "\\", "-", "_", ".", ",", or whitespace
3. 1-2 digits
4. another delimeter
5. 2 or 4 digits
"""
# Split string and verify length
s = re.split("[/\\-_., \t]+", strdate)
if len(s) != 3:
raise ValueError("input date must include 3 delimited numbers")
# Read the input numbers
y = int(s[0])
m = int(s[1])
d = int(s[2])
# Add 2000 to a 2-digit year
if y < 100:
y += 2000
return (y, m, d) | 48da1e5d2cf26480a931360b0872340d44da0eca | 684,853 |
def find_motif_positions(sequence: str, motif: str):
"""
Returns the start and end position(s) of a core motif in a sequence
Args:
sequence: string of nucleotides
motif: string of nucleotides representing a motif
Returns:
startpositions: list of start position(s) of core motif in read
endpositions: list of end position(s) of core motif in read
"""
startpositions = []
for i in range(len(sequence) - len(motif) + 1): # loop over read
match = True
for j in enumerate(motif): # loop over characters
if sequence[i + j[0]] != motif[j[0]]: # compare characters
match = False # mismatch
break
if match: # all chars matched
startpositions.append(i)
endpositions = []
for k in enumerate(startpositions):
endpositions.append(startpositions[k[0]] + len(motif) - 1)
return startpositions, endpositions | 5dea5868601e8c0bc080b1268fdf84a14e0ed180 | 684,854 |
def zeta_a(eN,cL,w):
"""
EnKF-N inflation estimation via w.
Returns zeta_a = (N-1)/pre-inflation^2.
Using this inside an iterative minimization as in the iEnKS
effectively blends the distinction between the primal and dual EnKF-N.
"""
N = len(w)
N1 = N-1
za = N1*cL/(eN + w@w)
return za | d7626d43b88bfb8c9d0dc0ec68949e9145c0a7dc | 684,855 |
def get_ptf_server_intf_index(tor, tbinfo, iface):
"""Get the index of ptf ToR-facing interface on ptf."""
mg_facts = tor.get_extended_minigraph_facts(tbinfo)
return mg_facts["minigraph_ptf_indices"][iface] | 3939174488f26607d7870bced1b4adfc8d8c185d | 684,859 |
def get_unique_locations(db):
"""
Gets an iterator to the unique locations (lat, lon)
:param db: Source database (VedDb)
:return: Iterator to the unique locations
"""
sql = "select distinct latitude, longitude from signal"
locations = db.query_iterator(sql)
return locations | 50b404303c1738840d29d4f189252ae963452c2f | 684,861 |
def preplace(schema, reverse_lookup, t):
"""
Replaces basic types and enums with default values.
:param schema:
the output of a simplified schema
:param reverse_lookup:
a support hash that goes from typename to graphql type, useful to navigate the schema in O(1)
:param t:
type that you need to generate the AST for, since it is recursive it may be anything inside the graph
"""
if t == 'String':
return '@code@'
elif t == 'Int':
return 1334
elif t == 'Boolean':
return 'true'
elif t == 'Float':
return 0.1334
elif t == 'ID':
return 14
elif reverse_lookup[t] == 'enum':
return list(schema['enum'][t].keys())[0]
elif reverse_lookup[t] == 'scalar':
# scalar may be any type, so the AST can be anything as well
# since the logic is custom implemented I have no generic way of replacing them
# for this reason we return it back as they are
return t
else:
return t | d0276e9f14f0e0c6558a5646e2c102aefe901a3f | 684,865 |
import requests
def snap(x, y, url, resource='/grid/snap'):
"""Determine the chip and tile coordinates for a point.
Args:
x (int): projection coordinate x
y (int): projection coordinate y
url (str): protocol://host:port/path
resource (str): /grid/snap/resource (default: /grid/snap)
Returns:
dict
Example:
>>> chipmunk.snap(x=0, y=0, url='http://host:port/path')
{'chip': {'grid-pt': [855.0, 1104.0], 'proj-pt': [-585.0, 2805.0]},
'tile': {'grid-pt': [17.0, 22.0], 'proj-pt': [-15585.0, 14805.0]}}
"""
url = '{}{}'.format(url, resource)
return requests.get(url=url, params={'x': x, 'y': y}).json() | c7f924095ae2d00edc5c372dc42c68ff717d7cd9 | 684,869 |
import ast
def str_rep_to_list(s):
"""
convert a string representation of list to a python list object.
:param s:
:return:
"""
return ast.literal_eval(s) | 24a0adbee342cb8288d8e34c370ec4e6551bf3ed | 684,876 |
def _split_list_by_function(l, func):
"""For each item in l, if func(l) is truthy, func(l) will be added to l1.
Otherwise, l will be added to l2.
"""
l1 = []
l2 = []
for item in l:
res = func(item)
if res:
l1.append(res)
else:
l2.append(item)
return l1, l2 | 3812a7b43cb103b746360943303441ae31122407 | 684,878 |
def print_time(time):
"""Format a datetime object to be human-readable"""
return time.astimezone(tz=None).strftime('%m/%d') | d75deefac5f2326637fe73757b8baee95d0ac42b | 684,879 |
def compute_eer(target_scores, nontarget_scores):
"""Calculate EER following the same way as in Kaldi.
Args:
target_scores (array-like): sequence of scores where the
label is the target class
nontarget_scores (array-like): sequence of scores where the
label is the non-target class
Returns:
eer (float): equal error rate
threshold (float): the value where the target error rate
(the proportion of target_scores below
threshold) is equal to the non-target
error rate (the proportion of nontarget_scores
above threshold)
"""
assert len(target_scores) != 0 and len(nontarget_scores) != 0
tgt_scores = sorted(target_scores)
nontgt_scores = sorted(nontarget_scores)
target_size = float(len(tgt_scores))
nontarget_size = len(nontgt_scores)
target_position = 0
for target_position, tgt_score in enumerate(tgt_scores[:-1]):
nontarget_n = nontarget_size * target_position / target_size
nontarget_position = int(nontarget_size - 1 - nontarget_n)
if nontarget_position < 0:
nontarget_position = 0
if nontgt_scores[nontarget_position] < tgt_score:
break
threshold = tgt_scores[target_position]
eer = target_position / target_size
return eer, threshold | 0bcd1235f02a6e84a82be6a6af82b76a8aeb0bbc | 684,880 |
from typing import Tuple
def get_module_and_func_names(command: str) -> Tuple[str, str]:
"""
Given a string of module.function, this functions returns the module name and func names.
It also checks to make sure that the string is of expected 'module.func' format
Args:
command (str): String of format module.function_name
Raises:
Exception: If the string is of not format
Returns:
Tuple[str, str]: (module_name, function_name) extracted from the input string
"""
mods = command.split('.')
if len(mods) <= 1:
raise Exception('The command should be a function to call')
func = mods[-1]
module = '.'.join(mods[:-1])
return module, func | 6c69d3242d7c4c25d926f140fb14858ad13bab56 | 684,881 |
def right_pad(xs, min_len, pad_element):
"""
Appends `pad_element`s to `xs` so that it has length `min_len`.
No-op if `len(xs) >= min_len`.
"""
return xs + [pad_element] * (min_len - len(xs)) | 00282f75ca3c9763c565d4d136d5e4971e3012ee | 684,884 |
def get_lids(f):
"""get identifiers that specify an absolute location (i.e. start with '/')"""
lids={}
for ns in f.ddef.keys():
lids[ns] = []
structures = f.ddef[ns]['structures']
for id in structures:
if id[0] == '/':
lids[ns].append(id)
return lids | e5ca9f0b4d9bc9328e6337e5de0256dc9129f397 | 684,886 |
def real2complex(rfield):
"""
convert raw qg_model output to complex numpy array
suppose input has shape
psi(time_step (optional), real_and_imag, ky, kx, z(optional))
"""
if rfield.shape[-2]+1 == 2*rfield.shape[-3]:
return rfield[...,0,:,:,:]+1j*rfield[...,1,:,:,:]
elif rfield.shape[-1]+1 == 2*rfield.shape[-2]:
return rfield[...,0,:,:]+1j*rfield[...,1,:,:]
else:
raise NotImplementedError('Unrecognized field type') | aa8c1f4179f4817aabe496396c539e36ce2e5657 | 684,887 |
def point_from_pose(pose):
"""get the origin point from a pose
Parameters
----------
pose : Pose
[description]
Returns
-------
Point, np array of three floats
[description]
"""
return pose[0] | 97abd4b1bf26fa8da8f45f30623caa25c540518b | 684,888 |
def strip_spaces(fields):
"""
Strip spaces and newline characters from a list of strings.
Inputs
------
fields : list of strings
Returns
-------
list : modified input list where the characters ' \n' have been stripped
Examples
--------
strip_spaces(['hi ', 'zeven 1', 'yo\n'])
"""
# id(fields[:]) ≠ id(fields). fields[:] creates a new copy to allow
# me to edit fields inside the loop (Remember Python passes by reference)
# strip out spaces
for element in fields[:]:
# pop out the first element
fields.pop(0)
# append the first element back after stripping
fields.append(element.strip(' \n')) # \n only needed for last element
return fields | 4cba8e32d9889655181287d2984f64493c3704c1 | 684,889 |
def readable_keyword(s):
"""Return keyword with only the first letter in title case."""
if s and not s.startswith("*") and not s.startswith("["):
if s.count("."):
library, name = s.rsplit(".", 1)
return library + "." + name[0].title() + name[1:].lower()
else:
return s[0].title() + s[1:].lower()
else:
return s | cda8151937feae49b69a945ae2f8feb8becb3d77 | 684,890 |
def _default_bounds(signal):
"""Create a default list of bounds for a given signal description
If no bounds were specified, they default to [0, 0]:
['name', 0, 0, 0, 0]
If no bits of the port were picked, the whole port is picked:
['name', x, y, x, y]
A signal may use a slice of a port
This example uses the 6th and the 7th bits of the 16-bit port:
['example_2_of_16', 15, 0, 7, 6]
:param signal can be one of:
'port_name'
['port_name', upper_bound, lower_bound]
['port_name', upper_bound, lower_bound, upper_picked, lower_picked]
:return:
['port_name', upper_bound, lower_bound, upper_picked, lower_picked]
"""
# there's just the name
if isinstance(signal, str):
return (signal, 0, 0, 0, 0)
else:
# there's just the name in a list
if len(signal) == 1:
return signal + [0, 0, 0, 0]
# there's the name and bounds
if len(signal) == 3:
return signal + [signal[1], signal[2]]
return signal | 42fec5f8af4b110ed4821c3509ddb187c8e1c24b | 684,892 |
def accept_objects(func):
"""This decorator can be applied to functions whose first argument is a list
of x, y, z points. It allows the function to also accept a list of objects
which have x(), y() and z() methods instead."""
def new_func(objects, *args, **kwargs):
try:
points = [(x, y, z) for x, y, z in objects]
except TypeError:
points = [(obj.x(), obj.y(), obj.z()) for obj in objects]
return func(points, *args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
return new_func | 829e99a7d7a61ffe99e5cd82663eac2548c24d6f | 684,895 |
def _filter_rows(df, min_ct=0):
"""Filter out rows with counts less than the minimum."""
row_sums = df.T.sum()
filtered_df = df[row_sums >= min_ct]
return filtered_df | 7d6130836a87f3033613fa8a94a1b81f265a2cb9 | 684,897 |
def get_last_id(list_of_id, width):
""" Gets the last identifier given a list of identifier.
:param list_of_id: list of identifier
:param width: the width of the identifier.
:return: the last identifier.
"""
last_number = 0
for identifier in list_of_id:
if identifier == "":
last_number = 0
else:
last_number = max(last_number, int(identifier.lstrip('0')))
last = (width - len(str(last_number))) * "0" + str(last_number)
return last | 22bd3c7cc4fd5aaae1005d7317bbe29f407091b4 | 684,899 |
def get_list_by_separating_strings(list_to_be_processed, char_to_be_replaced=",", str_to_replace_with_if_empty=None):
""" This function converts a list of type:
['str1, str2, str3', 'str4, str5, str6, str7', None, 'str8'] to:
[['str1', 'str2', 'str3'], ['str4', 'str5', 'str6', 'str7'], [], ['str8']]
"""
final_list =[]
if list_to_be_processed is not None and list_to_be_processed is not False and list_to_be_processed != "":
for i in range(0, len(list_to_be_processed)):
if list_to_be_processed[i] is None or list_to_be_processed[i] is False or list_to_be_processed[i] == "":
temp_list = []
else:
if list_to_be_processed[i] == "":
list_to_be_processed[i] = str_to_replace_with_if_empty
temp_list = list_to_be_processed[i].split(char_to_be_replaced)
for j in range(0, len(temp_list)):
temp_list[j] = temp_list[j].strip()
final_list.append(temp_list)
return final_list | 7702dd577145c910f4bedc67c85cae6fa7de0854 | 684,900 |
import pathlib
def get_configuration_path() -> str:
"""Returns project root folder."""
return str(pathlib.Path(__file__).parent.parent.parent) + '/configuration/conf.ini' | 25e63ac08f3f938dccfebcdf8e5e3ca77df91741 | 684,902 |
def try_int(s, *args):
"""Convert to integer if possible."""
#pylint: disable=invalid-name
try:
return int(s)
except (TypeError, ValueError):
return args[0] if args else s | 4b434d34bc4001795c923836789b2bfa529df1ee | 684,903 |
def datetime_adapter(obj, request):
"""Json adapter for datetime objects."""
try:
return obj.strftime('%d/%m/%Y %H:%M:%S')
except:
return obj.strftime('%d/%m/%Y') | 6bd91b959d64aaafae835a1fceb6e06bc01cf89f | 684,904 |
def validate_input_data(data):
"""
Takes in user input data
Checks if all the keys are provided
Raise key error if a key is missing
Returns a validated data in a dict format
"""
cleaned_data = {}
for key in data:
if data[key] is None:
assert False, key + ' key is missing'
else:
cleaned_data[key] = data[key]
return cleaned_data | bfbf144e9aab47557ad29fb747f7705e59a43e3a | 684,910 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.