content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def remove_underscore(text):
""" Call this on variable names and api endpoints, so that
BERT tokenizes names like 'find_place' as 'find place'. """
return text.replace("_", " ") | a91b33c4962881dbc68e90f7464e021dc615af32 | 684,144 |
def filter_contacts(cmap, threshold=0.2):
"""Remove low score contacts from contact prediction list.
:param cmap: Contact prediction map.
:type cmap: :class:`~conkit.core.contactmap.ContactMap`
:param threshold: Threshold, defaults to 0.2.
:type threshold: float, optional
"""
cmap.sort('raw_score', reverse=True, inplace=True)
cnt = 0
for contact in cmap:
if contact.raw_score < threshold:
break
else:
cnt = cnt+1
return cmap[:cnt-1] | 7820aed8394f1886fc0af966e31b83f2203848ca | 684,146 |
def split_seconds(seconds: int) -> dict:
"""This function converts seconds into a dictionary by splitting seconds without year, month, day, hour, minute
and second when possible.
:param seconds: seconds that will be converted
Example:
>>> from phanterpwa.tools import split_seconds
>>> split_seconds(123456789)
{'year': 3, 'month': 11, 'day': 3, 'hour': 21, 'minute': 33, 'second': 9}
>>> split_seconds(121)
{'minute': 2, 'second': 1}
>>> split_seconds(3659)
{'hour': 1, 'second': 59}
"""
if not isinstance(seconds, int):
raise ValueError("The seconds must be an integer. Given: {0}".format(seconds))
def s(seconds, d={}):
d = dict(**d)
if seconds >= 31536000:
if seconds % 31536000:
r = seconds % 31536000
d['year'] = seconds // 31536000
return s(r, d)
else:
d['year'] = seconds // 31536000
return d
else:
if seconds >= 2592000:
if seconds % 2592000:
r = seconds % 2592000
d['month'] = seconds // 2592000
return s(r, d)
else:
d['month'] = seconds // 2592000
return d
else:
if seconds >= 86400:
if seconds % 86400:
r = seconds % 86400
d['day'] = seconds // 86400
return s(r, d)
else:
d['day'] = seconds // 86400
return d
else:
if seconds >= 3600:
if seconds % 3600:
r = seconds % 3600
d['hour'] = seconds // 3600
return s(r, d)
else:
d['hour'] = seconds // 3600
return d
else:
if seconds >= 60:
if seconds % 60:
r = seconds % 60
d['minute'] = seconds // 60
return s(r, d)
else:
d['minute'] = seconds // 60
return d
else:
d['second'] = seconds
return d
return s(seconds) | babe5e8b304ad70b19fff21f4e0036d2c3f43c25 | 684,149 |
import base64
import json
def encode_data(data):
"""Return a base64 encoded json dump."""
encoded = base64.b64encode(
json.dumps(data).encode('utf-8')
)
assert len(encoded) < 250 * 1024
return encoded | ddb844d506dc9218845cfe3cefceb021401f2f73 | 684,155 |
import math
def getUnitCost(demand: int) -> float:
"""
Implementation of decreasing unit cost:
Unit cost drops as demand/production increases.
"""
average_fixed_cost = 2.5
weight = 0.75
average_variable_cost = weight*math.log(demand)
return average_fixed_cost + average_variable_cost | 7cf3fe49742cc078f5b178171753738e18ac36bb | 684,156 |
from typing import Dict
from typing import Union
def format_sum_formula(sumform: Dict[str, Union[int, float]], break_after: int = 99) -> str:
"""
Makes html formated sum formula from dictionary.
>>> format_sum_formula({'C': 12, 'H': 6, 'O': 3, 'Mn': 7})
'<html><body>C<sub>12 </sub>H<sub>6 </sub>O<sub>3 </sub>Mn<sub>7 </sub></body></html>'
"""
# atlist = formula_str_to_dict(sumform)
if not sumform:
return ''
l = ['<html><body>']
num = 0
for i in sumform:
if i == 'Id' or i == 'StructureId':
continue
if sumform[i] == 0 or sumform[i] == None:
continue
try:
times = round(sumform[i], 1)
except TypeError:
times = 1
if num > 3 and num % break_after == 0:
l.append("<br>")
try:
el = i.split('_')[1] # split here, because database returns 'Elem_C' for example
except IndexError:
el = i
l.append("{}<sub>{:g} </sub>".format(el, times))
num += 1
l.append('</body></html>')
formula = "".join(l)
# print(formula)
return formula | c38c273296884f268602a1f914dd5be52df4c904 | 684,159 |
def get_iwp_label_key( iwp_label ):
"""
Retrieves a key that locates the supplied IWP label within the underlying
dataset. The key returned locates the label both temporarly and spatially.
Takes 1 argument:
iwp_label - IWP label to locate.
Returns 1 value:
label_key - Tuple identifying iwp_label's location within a dataset. Comprised
of (time step index, z index).
"""
return (iwp_label["time_step_index"], iwp_label["z_index"]) | e5814e14f3d1b4c40074e4429ae5729ea7087321 | 684,160 |
import yaml
def template_params(path):
""" Return parameters as dict from a YAML template file.
"""
with open(path, "r") as file:
return yaml.safe_load(file) | 5bfc8d8d107fb517fa9f76877dc45a316c0e81ef | 684,161 |
def _mnl_transform_deriv_c(*args, **kwargs):
"""
Returns None.
This is a place holder function since the MNL model has no shape
parameters.
"""
# This is a place holder function since the MNL model has no shape
# parameters.
return None | a40b35679fbefd0c1c0e289c91ea48765784f73f | 684,163 |
def _pad(slist, n, c=" "):
"""_pad(slist, n, c=' ') pads each member of string list 'slist' with
fill character 'c' to a total length of 'n' characters and returns
the concatenated results.
strings longer than n are *truncated*.
>>>
>>> _pad(["this","that","the other"],9," ")
'this that the other'
"""
if isinstance(slist, str):
if n > len(slist):
return slist + c*(n-len(slist))
else:
return slist[:n]
else:
result = []
for s in slist:
if isinstance(s, str):
if n > len(s):
t = s + c*(n-len(s))
else:
t = s[:n]
else:
t = _pad(s, n, c)
result.append(t)
return "".join(result) | 45e9ee4f981f59b85be07fb961ec5db751dcb3ee | 684,164 |
def _ensure_trailing_slash(url: str) -> str:
"""Return url guaranteed to end in a slash"""
return url if url.endswith("/") else f"{url}/" | 84cce81a52b4e2c029a6dc3cb1a9bfb0b6dc25ea | 684,165 |
def _scoped_name(name_scope, node_name):
"""Returns scoped name for a node as a string in the form '<scope>/<node name>'.
Args:
name_scope: a string representing a scope name, similar to that of tf.name_scope.
node_name: a string representing the current node name.
Returns
A string representing a scoped name.
"""
if name_scope:
return '%s/%s' % (name_scope, node_name)
return node_name | 4e3ff71cb6ac74adc57637ba6ba5def59004ec6e | 684,166 |
def process_claim(claim):
"""Convert a claim row into a set of points"""
claim_number, details = [i.strip() for i in claim.split('@')]
# strip the leading #
claim_number = int(claim_number[1:])
coordinates, area = [i.strip() for i in details.split(':')]
column, row = [int(i) for i in coordinates.split(',')]
width, height = [int(i) for i in area.split('x')]
claims = set(
(x, y)
for x in range(row, row + height)
for y in range(column, column + width)
)
return claim_number, claims | 82b3a9d4469dc87e067de0528dfacf4ea3c1ea5b | 684,167 |
import math
def num_digits(n):
"""
Return the number of digits (in base 10) for integer n > 0
"""
return int(math.log10(n)) + 1 | f7e8f7a6d9eb34b7f3f0926144df92881b000724 | 684,170 |
def get_best_fuzz(predicted_mem):
"""Retrieve the best prediction"""
y_pred = predicted_mem.argmax(axis=1)
return y_pred | 339e6274e6326d767108146b7bf8e28f1ef43d65 | 684,173 |
def append_heatmap(tokens, scores, latex, gamma, caption, pad_token, formatting="colorbox", truncate_pad=True):
"""
Produce a heatmap for LaTeX
Format options: colorbox, text"""
if gamma != 1:
raise NotImplementedError
latex += "\n\\begin{figure}[!htb]"
for token, score in zip(tokens, scores):
if token == pad_token and truncate_pad:
continue
color = "blue"
if score >= 0:
color = "red"
latex += f"\\{formatting}" + "{" + f"{color}!{abs(score) * 100}" + "}" + "{" + token + "}"
latex += "\\caption{" + f"{caption}" + "}"
latex += "\\end{figure}\n"
return latex | a558988606fe7bd0514f2697fb6644fc47e6c9c4 | 684,176 |
def total_link_cost(net):
"""
Compute the total of link costs (volume * costfunction(volume))
over all links at current volumes on those links
Parameters:
net - Net object as returned by parse_net_file()
Return value:
total link cost
"""
return sum([link.volume * link.cost for link in net.links]) | aeffb5c731b670d215f6055a55807b5553f5b8d6 | 684,177 |
def make_grid(x, y, fill: int = 0):
"""Make a 2x2 list of lists filled with "fill"."""
return [[fill for y in range(y)] for _ in range(x)] | e854943ee62138a9f68cb91fc0887765ff491948 | 684,186 |
def notas(*n, sit=False):
"""
->Recebe várias notas de alunos, e retorna o número
de notas (aceita várias), a maior e menor nota,
a média e a situação (opcional)
:param n: uma ou mais notas
:param sit: (opcional) indica a situação do aluno
:return: dicionário com as informações
"""
informa = dict()
cont = tot = maior = menor = média = 0
for c in range(len(n)):
if c == 0:
maior = menor = n[c]
elif maior < n[c]:
maior = n[c]
elif menor < n[c]:
menor = n[c]
tot += n[c]
cont += 1
média = tot / cont
informa['total'] = cont
informa['maior'] = maior
informa['menor'] = menor
informa['média'] = float(f'{média:.2f}')
if sit:
if média < 5:
situação = 'RUÍM'
elif média < 7:
situação = 'RAZOÁVEL'
else:
situação = 'BOA'
informa['situação'] = situação
return informa | c9d932064383423ed4c6359ce2479ff6ea75fcc4 | 684,187 |
def i8(x):
"""truncates x to a 8-bit integer"""
return x & 0xFF | 564cd8ffbe89dcfa32dd0d7f82d066449df50b58 | 684,190 |
def get_las_version(las):
"""
Get the LAS file format version from an in-memory lasio.LAFile object.
There are 3 possible versions (https://www.cwls.org/products/):
- LAS 1.2
- LAS 2.0
- LAS 3.0
Args:
las (lasio.LASFile): An in-memory lasio.LASFile object
Returns:
version (float): LAS format version
"""
version = float(las.version[0].value)
return version | dc06eebdc0a710d1d8d46f2bb6bfa135f9296a91 | 684,192 |
from typing import Sequence
from typing import Dict
from typing import Any
def bind_function_args(argument_names: Sequence[str], *args,
**kwargs) -> Dict[str, Any]:
"""Returns a dict with function arguments."""
outputs = {}
for k, val in zip(argument_names, args):
outputs[k] = val
outputs.update(**kwargs)
return outputs | 39853df7944cc219c601dc2f58a8844832b81911 | 684,197 |
from typing import Dict
def make_character_dict() -> Dict[str, str]:
"""Create dict of {character: label, ->} to label smiles characters
"""
character_dict = {}
atoms = ["C", "O", "N", "S", "B", "P", "F", "I", "c", "n", "o", '*',
'Cl', 'Br', 'p', 'b', 'p', 's']
cyclic = list(range(1,100))
for atom in atoms:
character_dict[atom] = "atom"
for number in cyclic:
character_dict[str(number)] = "cyclic"
character_dict["="] = "double_bond"
character_dict["("] = "branch_start"
character_dict[")"] = "branch_end"
character_dict['\\'] = 'chiral_double_bond'
character_dict['/'] = 'chiral_double_bond'
character_dict['#'] = 'triple_bond'
character_dict['$'] = 'quadruple_bond'
character_dict['.'] = 'split'
character_dict['-'] = 'single_bond'
character_dict[':'] = 'aromatic_bond'
return character_dict | 2df05f45fc1bceeccff3e33672db8761ea031da6 | 684,203 |
def list_contains_only_xs(lst):
"""Check whether the given list contains only x's"""
for elem in lst:
if elem != "X":
return False
return True | b2aefed95cbb93bc43aec80a17d2712bee49a11b | 684,205 |
import json
def decode_json(json_string: str) -> dict:
"""
Takes a message as a JSON string and unpacks it to get a dictionary.
:param str json_string: A message, as a JSON string.
:return dict: An unverified dictionary. Do not trust this data.
"""
return json.JSONDecoder().decode(json_string) | 31ecca471008ba13d2b767a41027fd5b1bf99486 | 684,206 |
def set_length_units(client, units, file_=None, convert=None):
"""Set the current length units for a model.
This will search the model's available Unit Systems for the first one
which contains the given length unit.
Args:
client (obj):
creopyson Client.
units (str):
New length units.
`file_` (str|list:str, optional):
File name or List of file names;
Defaults is currently active model.
convert (bool, optional):
Whether to convert the model's length values to the
new units (True) or leave them the same value (False).
Defaults is True.
Returns:
None
"""
data = {"units": units}
if file_ is not None:
if isinstance(file_, (str)):
data["file"] = file_
elif isinstance(file_, (list)):
data["files"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
if convert is not None:
data["convert"] = convert
return client._creoson_post("file", "set_length_units", data) | d77a92be3cb107d59cbb5f5eb25b666a3880b283 | 684,207 |
import torch
import math
def log_sum_exp(a: torch.Tensor, b: torch.Tensor):
"""
Logsumexp with safety checks for infs.
"""
if torch.isinf(a):
return b
if torch.isinf(b):
return a
if a > b:
return math.log1p(math.exp(b - a)) + a
else:
return math.log1p(math.exp(a - b)) + b | e0e24a15a5ec1dd89376d09e8a558d2997b9356f | 684,208 |
from typing import List
from typing import Any
from typing import Union
from typing import Tuple
def isinstances(__obj: List[Any], __class_or_tuple: Union[Any, Tuple[Any]]) -> bool:
"""Return whether an every element of the list is an instance of a class or of a subclass thereof.
A tuple, as in `isinstance(x, (A, B, ...))`, may be given as the target to check against.
This is equivalent to `isinstance(x, A)` or `isinstance(x, B)` or ... etc.
"""
return all(isinstance(obj, __class_or_tuple) for obj in __obj) | 301beead20aabafeb6532a21fc5ad44fbc05fb4f | 684,209 |
def polynomial_power_combinations(degree):
"""
Combinations of powers for a 2D polynomial of a given degree.
Produces the (i, j) pairs to evaluate the polynomial with ``x**i*y**j``.
Parameters
----------
degree : int
The degree of the 2D polynomial. Must be >= 1.
Returns
-------
combinations : tuple
A tuple with ``(i, j)`` pairs.
Examples
--------
>>> print(polynomial_power_combinations(1))
((0, 0), (1, 0), (0, 1))
>>> print(polynomial_power_combinations(2))
((0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (0, 2))
>>> # This is a long polynomial so split it in two lines
>>> print(" ".join([str(c) for c in polynomial_power_combinations(3)]))
(0, 0) (1, 0) (0, 1) (2, 0) (1, 1) (0, 2) (3, 0) (2, 1) (1, 2) (0, 3)
>>> # A degree zero polynomial would be just the mean
>>> print(polynomial_power_combinations(0))
((0, 0),)
"""
if degree < 0:
raise ValueError("Invalid polynomial degree '{}'. Must be >= 0.".format(degree))
combinations = ((i, j) for j in range(degree + 1) for i in range(degree + 1 - j))
return tuple(sorted(combinations, key=sum)) | 8cdbea13354f92182792e07de60f1457b759f4e6 | 684,211 |
def from_cpp(str_msg, cls):
"""Return a ROS message from a serialized string
Parameters
----------
- str_msg: str, serialized message
- cls: ROS message class, e.g. sensor_msgs.msg.LaserScan.
"""
msg = cls()
result = msg.deserialize(str_msg)
return result | 49ddbb290ff8a0bada431e87f86f31dbb0899889 | 684,214 |
def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address: An IPv4 address in string format.
Returns:
String: The modified IPv4 address string.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj) | 154fc62abe71e108587cf18c23f81dfecc2ed916 | 684,228 |
import re
def join_lines(src, before, after, sep=" "):
"""
Remove the newline and indent between a pair of lines where the first
ends with ``before`` and the second starts with ``after``, replacing
it by the ``sep``.
"""
before_re = "][".join(before).join("[]")
after_re = "][".join(after).join("[]")
regex = "\n\\s*".join([before_re, after_re])
return re.sub(regex, sep.join([before, after]), src) | c11ace588e83edf4ea9447a2b1f043f01a07ffeb | 684,232 |
def flatten_stmts(stmts):
"""Return the full set of unique stms in a pre-assembled stmt graph.
The flattened list of of statements returned by this function can be
compared to the original set of unique statements to make sure no
statements have been lost during the preassembly process.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
A list of top-level statements with associated supporting statements
resulting from building a statement hierarchy with
:py:meth:`combine_related`.
Returns
-------
stmts : list of :py:class:`indra.statements.Statement`
List of all statements contained in the hierarchical statement graph.
Examples
--------
Calling :py:meth:`combine_related` on two statements results in one
top-level statement; calling :py:func:`flatten_stmts` recovers both:
>>> from indra.preassembler.hierarchy_manager import hierarchies
>>> braf = Agent('BRAF')
>>> map2k1 = Agent('MAP2K1')
>>> st1 = Phosphorylation(braf, map2k1)
>>> st2 = Phosphorylation(braf, map2k1, residue='S')
>>> pa = Preassembler(hierarchies, [st1, st2])
>>> pa.combine_related() # doctest:+ELLIPSIS
[Phosphorylation(BRAF(), MAP2K1(), S)]
>>> flattened = flatten_stmts(pa.related_stmts)
>>> flattened.sort(key=lambda x: x.matches_key())
>>> flattened
[Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)]
"""
total_stmts = set(stmts)
for stmt in stmts:
if stmt.supported_by:
children = flatten_stmts(stmt.supported_by)
total_stmts = total_stmts.union(children)
return list(total_stmts) | a527b40950042f530971cdb5f37559788c1d7b98 | 684,234 |
import math
def round_sig(number, precision=4):
""" Round number with given number of significant numbers - precision
Args:
number (number): number to round
precision (int): number of significant numbers
"""
if number == 0.0:
return number
return round(number, precision - int(math.floor(math.log10(abs(number)))) - 1) | 0f10f35dd0cc08e097853125f13968f8fffad7d3 | 684,237 |
def pairs(k, arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/pairs/problem
You will be given an array of integers and a target value. Determine the number of pairs of array elements that
have a difference equal to a target value.
Args:
k (int): The target difference
arr (list): list of integers
Returns:
int: number of pairs of integers whose difference is k
"""
vals = {i: 1 for i in arr}
count = 0
for i in arr:
try:
count += vals[i + k]
except KeyError:
pass
return count | ddf8236b4e87599ea4a969a6171e2c94b9bda732 | 684,238 |
def find(sexp, *names):
"""Return the first node in `sexp` whose name is in `names`"""
for child in sexp:
if child[0] in names:
return child | 35e581faa028447fcea1106da5877a4955147760 | 684,243 |
import json
def get_workflow_metadata(metadata_json):
"""Load workflow metadata from a JSON file.
Args:
metadata_json (str): Path to file containing metadata json for the workflow.
Returns:
metadata (dict): A dict consisting of Cromwell workflow metadata information.
"""
with open(metadata_json) as f:
metadata = json.load(f)
return metadata | ed838f72a3bb9df45d2d507a538aab5128cb3c8e | 684,245 |
def create_unbroadcast_axis(shape, broadcast_shape):
"""Creates the reduction axis for unbroadcasting.
Args:
shape: A list. The shape after the broadcast operation.
broadcast_shape: A list. The original shape the array being unbroadcast
had.
Returns:
A list. The axes along which the array needs to be reduced. These axes will
be distributed evenly into the original shape.
"""
return tuple(
-(1 + i)
for i in range(len(broadcast_shape))
if i >= len(shape) or broadcast_shape[-(1 + i)] > shape[-(1 + i)]) | 2758f1f1b993dfa7bdba10343cc9afde4cfcf38e | 684,249 |
def check_zeros(counts):
"""
helper function to check if vector is all zero
:param counts:
:return: bool
"""
if sum(counts) == 0:
return True
else:
return False | cfaeb34b6ad94fc21c795720f3c8754f3d1823be | 684,250 |
def get_nat_orb_occ(lines):
"""
Find the natural orbital occupations
"""
nat_occ = []
start = False
for line in lines:
if start:
if line.strip():
nat_occ.append(abs(float(line.split('=')[-1].strip())))
else:
break
elif line == 'Natural Orbital Occupation Numbers:\n':
start = True
return nat_occ | c1fe1b5f25578d2884925b7e2b786ee26a83dde9 | 684,251 |
def are_words_in_word_list(
words, word_list, case_sensitive=False, get_score=False, all_must_match=True
):
"""Checks if word(s) are contained in another word list.
The search can be performed with or without case sensitivity.
The check words can contain wildcards, e.g. "abc*" to allow
a wider range of matches against the word list."""
if not isinstance(words, list):
check_words = [words]
else:
check_words = words
found = {}
for w in check_words:
word = w.lower() if not case_sensitive else w
if "*" in word:
idx = word.find("*") - 1
word = word[:idx]
for wl in word_list:
wl = wl.lower() if not case_sensitive else wl
if wl.startswith(word):
found[word] = True
if all_must_match and len(found) == len(check_words):
if get_score:
return True, len(found)
return True
if not all_must_match and len(found) > 0:
if get_score:
return True, len(found)
return True
if get_score:
return False, len(found)
return False | f1fa12e313fb65cf8606c7f81cc16b99b9e35c58 | 684,252 |
def percentiles_from_counts(counts_dt, percentiles_range=None):
"""Returns [(percentile, value)] with nearest rank percentiles.
Percentile 0: <min_value>, 100: <max_value>.
counts_dt: { <value>: <count> }
percentiles_range: iterable for percentiles to calculate; 0 <= ~ <= 100
Source: https://stackoverflow.com/questions/25070086/percentiles-from-counts-of-values
"""
if percentiles_range is None:
percentiles_range = [0, 1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 99, 100]
# This handle the input if the case under analysis is completely absent in the annotation (count_dt is empty)
if not counts_dt:
vals = [0 for _ in range(len(percentiles_range))]
percentiles = list(zip(percentiles_range, vals))
return percentiles
assert all(0 <= p <= 100 for p in percentiles_range)
percentiles = []
num = sum(counts_dt.values())
counts = sorted(counts_dt.items())
curr_counts_pos = 0 # current position in counts
curr_pos = counts[0][1] # sum of frequencies up to curr_counts_pos
for p in sorted(percentiles_range):
if p < 100:
percentile_pos = p / 100.0 * num
while curr_pos <= percentile_pos and curr_counts_pos < len(counts):
curr_counts_pos += 1
curr_pos += counts[curr_counts_pos][1]
percentiles.append((p, counts[curr_counts_pos][0]))
else:
percentiles.append((p, counts[-1][0]))
return percentiles | 4a0ac65e8e35cee866d0aad2cbdcf066d06135f5 | 684,253 |
def epsi_vapor_bot(Fr_bot):
"""
Calculates the vapor content of bubble layer at the bottom of column
Parameters
----------
Fr_bot : float
The Frudo criterion at the bottom of column, [dimensionless]
Returns
-------
epsi_vapor_bot : float
The vapor content of bubble layer at the bottom of column, [dimensionless]
References
----------
Дытнерский, страница 207, формула 5.47
"""
return Fr_bot**0.5 / (1 + Fr_bot**0.5) | 8828831e85ea2afcd645fb87f61961f8b32ca49c | 684,254 |
def flatten_double_list(double_list):
"""
flatten a double list into a single list.
"""
return [obj for single_list in double_list for obj in single_list] | 6ddfda54fe265947d8b6d6b9f05f926cc825b048 | 684,260 |
def slice_list_to_chunks(lst, n):
"""
Slice a list into chunks of size n.
Args:
list
int
Returns:
[list]
"""
chunks = [lst[x:x+n] for x in range(0, len(lst), n)]
return chunks | 0082ae78492b3927c63740b1606108b6e29f82a7 | 684,263 |
def get_age_bracket(school_type):
"""Return the age structure for different school types."""
age_brackets = {
'primary':[6, 7, 8, 9],
'primary_dc':[6, 7, 8, 9],
'lower_secondary':[10, 11, 12, 13],
'lower_secondary_dc':[10, 11, 12, 13],
'upper_secondary':[14, 15, 16, 17],
'secondary':[10, 11, 12, 13, 14, 15, 16, 17],
'secondary_dc':[10, 11, 12, 13, 14, 15, 16, 17]
}
return age_brackets[school_type] | 099128fd30f332789ea457b6411d64148dec1ce3 | 684,264 |
def seq_to_arch(seq, num_nodes):
"""
Translates given sequential representation of an architecture sampled by
the controller to an architecture
Arguments:
seq: sequential representation of architecture
num_nodes: number of nodes in cell including the two input nodes
Returns:
a list of 4-tuples (n0 op0 n1 op1), where n0 and n1 are two previous
nodes and op0 and op1 are operations to be applied to n0 and n1,
respectively.
e.g. [(0, 1, 0, 1), (0, 1, 0, 1), (0, 1, 0, 1), (0, 1, 0, 1), (0, 4, 0, 4)]
"""
arch = []
for i in range(0, len(seq), 4):
arch.append(
(
seq[i].item() - 1,
seq[i + 1].item() - (num_nodes - 1) - 1,
seq[i + 2].item() - 1,
seq[i + 3].item() - (num_nodes - 1) - 1,
)
)
return arch | 835ec0b656cf2ef310b7b4b43fb52280611c1820 | 684,265 |
def force_tuple(x):
"""Make tuple out of `x` if not already a tuple or `x` is None"""
if x is not None and not isinstance(x, tuple):
return (x,)
else:
return x | 3f2b16e4cfd95e9cd1d86c54a772aca18e4c1ca7 | 684,268 |
from typing import List
from typing import Dict
def _build_record_dict(record_strings: List[str]) -> Dict:
"""
Parse the pbn line by line. When a block section like "Auction" or "Play" is encountered, collect all the content of
the block into a single entry
:param record_strings: List of string lines for a single board
:return: A dictionary mapping keys from the pbn to strings or other useful values (e.g. list of strings for the
bidding record)
"""
record_dict = {}
# Janky while loop to handle non bracketed lines
i = 0
while i < len(record_strings):
record_string = record_strings[i]
if not (record_string.startswith("[") or record_string.startswith("{")):
i += 1
continue
if record_string.startswith("{"):
commentary = ""
while i < len(record_strings):
record_string = record_strings[i]
if record_string.startswith("["):
break
commentary += record_string + " "
i += 1
record_dict["Commentary"] = commentary.strip()
continue
if record_string.startswith("[") and "]" not in record_string:
while "]" not in record_string:
i += 1
record_string = record_string + record_strings[i]
record_string = record_string.replace("[", "").replace("]", "")
key, value = record_string.split(maxsplit=1)
value = value.replace('"', "")
if key == "Note":
number, message = value.split(":", maxsplit=1)
key = key + "_" + number
value = message
record_dict[key] = value
if key == "Auction":
auction_record = []
i += 1
while i < len(record_strings):
auction_str = record_strings[i]
if "[" in auction_str:
break
auction_record.extend(auction_str.split())
i += 1
record_dict["bidding_record"] = auction_record
elif key == "Play":
play_record = []
i += 1
while i < len(record_strings):
play_str = record_strings[i]
if "[" in play_str or play_str == "*":
break
play_record.append(play_str.split())
i += 1
record_dict["play_record"] = play_record
else:
i += 1
return record_dict | 173a95d485dae9d6bfcc77f5d73c9ee3305e735a | 684,269 |
def ts_to_vtt(timestamp):
"""
ts_to_vtt converts timestamp into webvtt times
"""
hours, seconds = divmod(timestamp, 3600)
mins, seconds = divmod(seconds, 60)
seconds = round(seconds, 3)
return f" {int(hours):02}:{int(mins):02}:{seconds:02}" | fe2b89aa45db2fe9de296623219770793584c705 | 684,275 |
def _node_label(node):
"""Generate a node label in the format of "support:name" if both exist,
or "support" or "name" if either exists.
Parameters
----------
skbio.TreeNode
node containing support value or name
Returns
-------
str
Generated node label
"""
lblst = []
if node.support is not None: # prevents support of NoneType
lblst.append(str(node.support))
if node.name: # prevents name of NoneType
lblst.append(node.name)
return ':'.join(lblst) | fee8956d87dbb086a4723012822f93b45d4aac6c | 684,276 |
def fetch_tensor(name, ws):
"""Return the value of a tensor."""
return ws.get_tensor(name).ToNumpy() | 9f735a55e14f6c548a3e5c543ef9d7e9dc1bf54b | 684,277 |
def torch2numpy(img):
"""
Converts a torch image to numpy format
"""
if img.dim() == 4:
img = img.permute(0, 2, 3, 1).contiguous()
elif img.dim() == 3:
img = img.permute(1, 2, 0).contiguous()
return img.cpu().numpy() | 25fee887319d6014bc480498550c6b95fb0f6ea8 | 684,281 |
from typing import Dict
import re
def str_replace(s: str, replacements: Dict[str, str]) -> str:
"""Replaces keys in replacements dict with their values."""
for word, replacement in replacements.items():
if word in s:
s = re.sub(re.escape(word), replacement, s, flags=re.IGNORECASE)
return s | dc805fc0c0774e2de0448d3860231743e909eaba | 684,282 |
def get_bits(num, gen):
""" Get "num" bits from gen """
out = 0
for i in range(num):
out <<= 1
val = gen.next()
if val != []:
out += val & 0x01
else:
return []
return out | cf6e6d34998f86f75861f39674c67551bf8c64df | 684,283 |
def GetMangledParam(datatype):
"""Returns a mangled identifier for the datatype."""
if len(datatype) <= 2:
return datatype.replace('[', 'A')
ret = ''
for i in range(1, len(datatype)):
c = datatype[i]
if c == '[':
ret += 'A'
elif c.isupper() or datatype[i - 1] in ['/', 'L']:
ret += c.upper()
return ret | 30a6e1af027de612b9217bf68fc77b65ff59cb32 | 684,285 |
def get_feature_class(base_name, workspace):
"""
Creates a valid ESRI reference to a shapefile or geodatabase feature class
base_name: the name of the feature class, without any extension
workspace: the name of the folder (for shapefiles) or geodatabase (for gdb feature classes)
return: a text string referring to the location of the feature class
"""
if workspace[-4:] != '.gdb':
fc = workspace + '\\' + base_name + '.shp'
else:
fc = workspace + '\\' + base_name
return fc | 3e7e267ebf1cb53e93e4a4426e0846c72a234a3a | 684,289 |
import signal
def signal_to_human(value):
"""signal_to_human() -- provide signal name based on subprocess return code
Args:
value (int) - Popen.returncode
Returns:
Signal name if it exists, otherwise the original value provided
"""
signals = {getattr(signal, name) * -1 : name for name in dir(signal) if name.startswith("SIG")}
if value < 0 and value in signals:
return signals[value]
return value | 2c8d3f098cb4a952bafda71cf3d20b3b834ef424 | 684,291 |
def interpret_bintime(bintime):
"""If bin time is negative, interpret as power of two.
Examples
--------
>>> interpret_bintime(2)
2
>>> interpret_bintime(-2) == 0.25
True
>>> interpret_bintime(0)
Traceback (most recent call last):
...
ValueError: Bin time cannot be = 0
"""
if bintime < 0:
return 2 ** bintime
elif bintime > 0:
return bintime
raise ValueError("Bin time cannot be = 0") | d5d90415c9725a3f7bc12e28bb1b5f216d2aee2c | 684,293 |
def get_kernel(x, kernel):
"""
Calculates the kernel size given the input. Kernel size is changed only if the input dimentions are smaller
than kernel
Args:
x: The input vector.
kernel: The height and width of the convolution kernel filter
Returns:
The height and width of new convolution kernel filter
"""
height = kernel[0]
width = kernel[1]
if x.get_shape().as_list()[1] < height:
height = x.get_shape().as_list()[1]
elif x.get_shape().as_list()[2] < width:
width = x.get_shape().as_list()[2]
return (height, width) | 5e07970f0a9ec7f0802371b9ef6c416d2f4e21d2 | 684,297 |
def pot_LJ_dl(r):
"""Dimensionless Lenard Jones potential, based on distances"""
r = r**-6 # Possibly improves speed
u = 4*(r**2 - r)
return u | 4e8491128690e0e31045e99e47fdb3c9ef7dc587 | 684,303 |
def fit_model(model, callbacks_list, sequence, outseq, n_epoch):
"""
Make RAENN model
Parameters
----------
model : keras.models.Model
RAENN model to be trained
callbacks_list : list
List of keras callbacks
sequence : numpy.ndarray
Array LC flux times, values and errors
outseq : numpy.ndarray
An array of LC flux values and limiting magnitudes
n_epoch : int
Number of epochs to train for
Returns
-------
model : keras.models.Model
Trained keras model
"""
model.fit([sequence, outseq], sequence, epochs=n_epoch, verbose=1,
shuffle=False, callbacks=callbacks_list, validation_split=0.33)
return model | e2f8982ba63b2f9fc10bbafd133c6b54676abd15 | 684,305 |
def get_named_object(pathspec):
"""Return a named from a module.
"""
parts = pathspec.split('.')
module = ".".join(parts[:-1])
mod = __import__(module, fromlist=parts[-1])
named_obj = getattr(mod, parts[-1])
return named_obj | d44969dcd18a0dcf1cfb50ab2f0e7ab593e46f77 | 684,306 |
def get_formatted_emg(emg_row):
"""
:param emg_row: dict [str] one row that represent data from Electromyograph sensor
example:
['2018-07-04T17:39:53.743240', 'emg', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2', '2018-07-04T17:39:53.742082']
:return:
formatted emg row
example:
['2018-07-04T17:39:53.743240', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2']
"""
new_emg_row = emg_row.copy()
new_emg_row.pop(1) # remove 'emg' word
new_emg_row.pop(9) # remove last timestamp
return new_emg_row | ea939fdf8e99a1048a48ce6f021d03ca4143b862 | 684,310 |
from typing import OrderedDict
def get_companies(dbconnection, args):
"""
Return an ordered dictionary of information from the company table.
The key is the db key
"""
print('Companies')
cursor = dbconnection.cursor()
cursor.execute('SELECT * FROM Companies')
row = cursor.fetchone()
company_dict = OrderedDict()
row = cursor.fetchone()
while row is not None:
company_dict[row[0]] = row[1]
row = cursor.fetchone()
# cursor.close()
return company_dict | 6373a86142beb2f296926dff5bdb8f831fc1f0f7 | 684,316 |
def code() -> str:
"""
Example G-code module, a drawing of a crocodile.
Please simulate first, before milling.
"""
return """
G91
G17
G3 X20 Y0 I10 J0
G0 X40 Y0
G3 X6 Y0 I3 J0
G0 X0 Y3
G3 X-3 Y3 I-3 J0
G0 X-40 Y0
G2 X0 Y6 I0 J3
G0 X40 Y0
G3 X0 Y6 I0 J3
G0 X-40 Y5
G0 X-10 Y10
""" | 231f10f30b774dda7584871f220bbeecda8191ad | 684,318 |
import re
def remove_none_alphanumeric(string):
"""
remove non-alphanumeric characters
"""
pattern = re.compile("[\W_]+", re.UNICODE)
return pattern.sub("", string) | 64f131d5830604c82f6462a7aa5d1043737af58d | 684,319 |
def map_values(map_fn, dictionary):
"""Returns a dictionary whose values have been transformed by map_fn.
"""
return {k: map_fn(v) for k, v in dictionary.items()} | c2d7a843cfe45e52b8d1f0ba6760b98eaebb287f | 684,322 |
def create_poll(poll_options: list) -> dict:
"""
Creates a poll of a list of options
:param poll_options:
:return:
"""
poll_opts_map = {}
for opt in poll_options:
poll_opts_map.update({opt.lstrip(" ").rstrip(" "): 0})
return poll_opts_map | 7a9fdd56175f2f731fc9d73a54fbc38aff3d5575 | 684,324 |
import re
def loads(hesciiStr, prefix='x'):
"""
Takes a hescii-encoded string and returns the utf-8 byte string.
Args:
hesciiStr: a hescii-encoded string
prefix: string used to prefix all encoded hex values. Default 'x'
Returns:
A utf-8 byte string
"""
def repl(match):
s = match.group()
return s[len(prefix):].decode('hex')
pattern = prefix + r'([0123456789abcdefABCDEF][0123456789abcdefABCDEF])+'
return re.sub(pattern, repl, hesciiStr) | 35b380bc80a57bcc7c6e7c00b36e3001e1aafd8a | 684,325 |
def _make_package(x):
"""Get the package name and drop the last '.' """
package = ''
for p in x:
package += p[0] + p[1]
if package and package[-1] == '.':
package = package[:-1]
return package | 1455cdb7bf508ca5fb93046aba777ded9b55c81b | 684,328 |
def _conv(n):
"""Convert a node name to a valid dot name, which can't contain the leading space"""
if n.startswith(' '):
return 't_' + n[1:]
else:
return 'n_' + n | 291f075bc0d653949c9e35308e4ecb5c14e156e3 | 684,329 |
def val_or_none_key(getter_fcn):
"""Wraps getter_fcn, returning a key that is a tuple of (0 or 1, val) where
val=getter_fcn(obj), and the int is 0 if val is None."""
def result_key_fcn(obj):
val = getter_fcn(obj)
n = 0 if val is None else 1
return n, val
return result_key_fcn | e56270f48254a3b35393354ada00166564ecbfcb | 684,330 |
def strip_diagnostics(tracks):
"""Remove diagnostic information from a tracks DataFrame.
This returns a copy of the DataFrame. Columns with names that start
with "diag_" are excluded."""
base_cols = [cn for cn in tracks.columns if not cn.startswith('diag_')]
return tracks.reindex(columns=base_cols) | 84a6f84e0cea1446daf399caaa6190c523892713 | 684,331 |
def make_token(name, value=''):
"""Make a token with name and optional value."""
return {'name': name, 'value': value} | 0170bba93a001498e11af0cc3295321d4095b1b7 | 684,332 |
def convert_frac(ratio):
""" Converts ratio strings into float, e.g. 1.0/2.0 -> 0.5 """
try:
return float(ratio)
except ValueError:
num, denom = ratio.split('/')
return float(num) / float(denom) | 5503f0335a9371d4fb008b005fc0b268a86209e8 | 684,333 |
def k8s_url(namespace, kind, name=None):
"""
Construct URL referring to a set of kubernetes resources
Only supports the subset of URLs that we need to generate for use
in kubespawner. This currently covers:
- All resources of a specific kind in a namespace
- A resource with a specific name of a specific kind
"""
url_parts = [
'api',
'v1',
'namespaces',
namespace,
kind
]
if name is not None:
url_parts.append(name)
return '/' + '/'.join(url_parts) | 8d54770990fb500da1a42df891e21d1dce18bef9 | 684,334 |
def _pad_vocabulary(vocab, math):
"""
Pads vocabulary to a multiple of 'pad' tokens.
Args:
vocab (list): list with vocabulary
math (str): Math precision. either `fp_16`, `manual_fp16` or `fp32`
Returns:
list: padded vocabulary
"""
if math == "fp16":
pad = 8
elif math == "fp32":
pad = 1
else:
raise NotImplementedError()
vocab_size = len(vocab)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f"madeupword{i:04d}"
vocab.append(token)
assert len(vocab) % pad == 0
return vocab | b48fc59dea22e5355811dd3c00184c11532a2c87 | 684,337 |
def convert_r_groups_to_tuples(r_groups):
""" Converts a list of R-Group model objects to R-Group tuples"""
return [r_group.convert_to_tuple() for r_group in r_groups] | aad1b55ae70b354d231c6765949b0ea587d5d28b | 684,338 |
import re
def domain(url):
"""Get domain from url.
Domain must start with ``http://``, ``https://``
or ``/``.
Parameters
----------
url: str
URL to parse to extract the domain.
Raises
------
ValueError
If the URL pattern is invalid.
Examples
--------
>>> domain('https://example.com/test/page.html')
'https://example.com/'
>>> domain('http://example.com/test/page.html')
'http://example.com/'
>>> domain('/example.com/test/page.html')
'/example.com/'
"""
pattern = re.compile(r'^(https?:/)?(/[a-z0-9][a-z0-9-_.]*/)')
res = pattern.match(url)
if res:
return res.group(2) if res.group(1) is None else ''.join(res.groups())
raise ValueError(f'Invalid URL pattern: `{url}`.') | ace1ecd6669a624febfb4f1b320a999ab0be006e | 684,341 |
def scale(x, s):
"""Scales x by scaling factor s.
Parameters
----------
x : float
s : float
Returns
-------
x : float
"""
x *= s
return x | 30846643be0c37cfd2f13e38edc4e3b6b4b27026 | 684,345 |
def pop_sort(pop):
"""
This function sorts the population based on their fitnesses, using selection sort.
pop: The population that are sorted based on their fitnesses.
"""
for i in range(len(pop)):
min_index = i
for j in range(i+1, len(pop)):
if pop[min_index].fitness > pop[j].fitness:
min_index = j
pop[i], pop[min_index] = pop[min_index], pop[i]
return pop | 0615aa2a9fc96b9d7047e2cc5f087ade2ae5a984 | 684,347 |
def subverParseClient(s):
"""return the client name given a subversion string"""
return s[1:].split(":")[0] | 35de59e7a18603341154c26f5da3c19a80631976 | 684,348 |
import requests
def get_all_service_groups(asa):
"""
This function retrieves all service groups from ASA
:param asa: ASA device which to retrieve the objects from ASA
:return: Returns list of service objects.
"""
url = asa.url() + "/api/objects/networkservicegroups"
headers = {
'Content-Type': 'application/json',
'User-agent': 'REST API Agent',
'X-Auth-Token': asa.token
}
response = requests.request("GET", url, headers=headers, verify=False).json()['items']
return response | ff62bec47f7bf306276e81dc2e710b3cb789986c | 684,351 |
def get_indexes(lst, sub_lst, compare_function=None):
"""Return the indexes of a sub list in a list.
:param lst: the list to search in
:param sub_lst: the list to match
:param compare_function: the comparaison function used
:type lst: list
:type sub_lst: list
:type compare_function: function, takes 2 list as argument
:return: the list of indexes of sub_lst in lst.
:rtype: list of int
"""
indexes = []
ln = len(sub_lst)
for i in range(len(lst)):
if compare_function:
if compare_function(lst[i:i + ln], sub_lst):
indexes.append(i)
else:
if lst[i:i + ln] == sub_lst:
indexes.append(i)
return indexes | 6780b77157ba425384aeebc2ff3d93f19a6fb561 | 684,354 |
def _join_modules(module1, module2):
"""Concatenate 2 module components.
Args:
module1: First module to join.
module2: Second module to join.
Returns:
Given two modules aaa.bbb and ccc.ddd, returns a joined
module aaa.bbb.ccc.ddd.
"""
if not module1:
return module2
if not module2:
return module1
return '%s.%s' % (module1, module2) | d528261fbe8fda829b59509612f3cee652024cbc | 684,355 |
def from_723(u: bytes) -> int:
"""Convert from ISO 9660 7.2.3 format to uint16_t
Return the little-endian part always, to handle non-specs-compliant images.
"""
return u[0] | (u[1] << 8) | 3665d3a4812e87a0d2d727027e6e69ce5da8e871 | 684,356 |
import base64
def decode_textfield_base64(content):
"""
Decodes the contents for CIF textfield from Base64.
:param content: a string with contents
:return: decoded string
"""
return base64.standard_b64decode(content) | 9f0ebc8cdfc4a7337e44451a3856df472bc07f56 | 684,357 |
from typing import Any
def default_key(obj: object, key: str, default: Any = None) -> Any:
"""Get value by attribute in object or by key in dict.
If property exists returns value otherwise returns `default` value.
If object not exist or object don't have property returns `default`.
Args:
obj (object, list, tuple, dict): dictionary
key (str, int): key of property
default (object): Object that returned if key not found
Returns:
value by given key or default
"""
if obj and isinstance(obj, dict):
return obj[key] if key in obj else default
elif obj and isinstance(obj, object):
return getattr(obj, key, default)
else:
return default | 6dff248c8f463259b9c90239a812bc5d23034eaa | 684,358 |
def _get_lua_base_module_parts(base_path, base_module):
"""
Get a base module from either provided data, or from the base path of the package
Args:
base_path: The package path
base_module: None, or a string representing the absence/presence of a base
module override
Returns:
Returns a list of parts of a base module based on base_path/base_module.
If base_module is None, a default one is created based on package name.
"""
# If base module is unset, prepare a default.
if base_module == None:
return ["fbcode"] + base_path.split("/")
# If base module is empty, return the empty list.
elif not base_module:
return []
# Otherwise, split it on the module separater.
else:
return base_module.split(".") | eee4ef02108e49da8b601af7fcd1111257ece103 | 684,359 |
def broken_bond_keys(tra):
""" keys for bonds that are broken in the transformation
"""
_, _, brk_bnd_keys = tra
return brk_bnd_keys | f499a912e4b11bb4fd71e6bb1a9a8991e26f44bb | 684,363 |
def named_field(key, regex, vim=False):
"""
Creates a named regex group that can be referend via a backref.
If key is None the backref is referenced by number.
References:
https://docs.python.org/2/library/re.html#regular-expression-syntax
"""
if key is None:
# return regex
return r'(%s)' % (regex,)
if vim:
return r'\(%s\)' % (regex)
else:
return r'(?P<%s>%s)' % (key, regex) | b813179e87f8bf6cabc02d7746222787154aed3a | 684,365 |
import torch
def _set_finite_diff_coeffs(ndim, dx, device, dtype):
"""Calculates coefficients for finite difference derivatives.
Currently only supports 4th order accurate derivatives.
Args:
ndim: Int specifying number of dimensions (1, 2, or 3)
dx: Float Tensor containing cell spacing in each dimension
device: PyTorch device to create coefficient Tensors on
dtype: PyTorch datatype to use
Returns:
Float Tensors containing the coefficients for 1st and 2nd
derivatives.
fd1: Contains 2 coefficients for each dimension
fd2: Contains 1 coefficient for the central element, followed by
2 coefficients for each dimension
"""
fd1 = torch.zeros(ndim, 2, device=device, dtype=dtype)
fd2 = torch.zeros(ndim * 2 + 1, device=device, dtype=dtype)
dx = dx.to(device).to(dtype)
for dim in range(ndim):
fd1[dim] = (
torch.tensor([8 / 12, -1 / 12], device=device, dtype=dtype)
/ dx[dim]
)
fd2[0] += -5 / 2 / dx[dim] ** 2
fd2[1 + dim * 2 : 1 + (dim + 1) * 2] = (
torch.tensor([4 / 3, -1 / 12], device=device, dtype=dtype)
/ dx[dim] ** 2
)
return fd1, fd2 | ef270d3a7538c54956410d86138f1b7d4c219428 | 684,371 |
def build_regex(pattern, pattern_name=None, **kwargs):
"""
Return regex string as a named capture group.
See: https://tonysyu.github.io/readable-regular-expressions-in-python.html
"""
pattern = pattern.format(**kwargs)
if pattern_name is not None:
return r'(?P<{name}>{pattern})'.format(name=pattern_name, pattern=pattern)
return pattern | 0625d70bc0b16416e3178c59100ab815e73c8b56 | 684,372 |
def number_of_patients(dataset, feature_files, label_files):
"""
Calculates number of unique patients in the list of given filenames.
:param dataset: string. Dataset train/val/test.
:param feature_files: list of strings. List of filenames with patient names containing features.
:param label_files: list of strings. List of filenames with patient names containing labels.
:return: int. Number of unique patients.
"""
if len(feature_files) != len(label_files):
raise AssertionError(dataset, 'files have different length.')
print('Number of', dataset, 'files:', len(feature_files))
patients = []
for file in feature_files:
patient = file[:4]
if patient not in patients:
patients.append(patient)
print(dataset, 'patients:', patients)
num_patients = len(patients)
print('Num', dataset, 'patients:', num_patients)
return num_patients | ac8c0a269d9019768af67d20409a89d78e6f5512 | 684,373 |
def invert_dict(dic, sort=True, keymap={}, valmap={}):
"""Inverts a dictionary of the form
key1 : [val1, val2]
key2 : [val1]
to a dictionary of the form
val1 : [key1, key2]
val2 : [key2]
Parameters
-----------
dic : dict
Returns
-----------
dict
"""
dic_inv = {}
for k, v_list in dic.items():
k = keymap.get(k, k)
for v in v_list:
v = valmap.get(v, v)
if v in dic_inv:
dic_inv[v].append(k)
else:
dic_inv[v] = [k]
if sort:
for k in dic_inv.keys():
dic_inv[k].sort()
return dic_inv | 4b7b55dcde48db84dbc853d06511f702090da422 | 684,374 |
def byte_ord(b):
"""
Return the integer representation of the byte string. This supports Python
3 byte arrays as well as standard strings.
"""
try:
return ord(b)
except TypeError:
return b | a41594431549ccf70408f7c9180c6a8c5c487e7b | 684,381 |
import re
def parse_time(time_string) -> int:
""" Parse a time stamp in seconds (default) or milliseconds (with "ms" unit)
The "s" unit is optional and implied if left out.
Args:
time_string(str): timestamp, e.g., "0.23s", "5.234" (implied s), "1234 ms"
must be a number followed by "s", "ms" or nothing.
Returns:
int: time represented by time_string in milliseconds
"""
time_pattern = re.compile(
r"""
\s* # ignore leading spaces
([0-9.]+) # Numerical part
\s* # optional spaces
(
(s|ms) # optional units: s (seconds) or ms (milliseconds)
\s* # ignore trailing spaces
)?
""",
re.VERBOSE,
)
match = time_pattern.fullmatch(time_string)
if match:
units = match[3]
if units == "ms":
return int(match[1])
else:
return int(1000 * float(match[1]))
else:
raise ValueError(
f'cannot convert "{time_string}" to a time in seconds or milliseconds'
) | b9d3d1cc4f388def47b3dd620a656f60f7c50929 | 684,384 |
def confidence_intervals_overlap(old_score, old_ci, new_score, new_ci):
"""Returns true if the confidence intervals of the old and new scores
overlap, false otherwise.
"""
if old_score < new_score:
old_score += old_ci
new_score -= new_ci
return old_score >= new_score
else:
old_score -= old_ci
new_score += new_ci
return old_score <= new_score | fbf87f5c1997624c965b8bfb2b540d1c833a24e3 | 684,387 |
def data2str(data):
"""
Convert some data to a string.
An empty or None value is returned unchanged (helpful for testing), e.g.:
'57 75 6e 64 65 72 62 61 72 49 52' -> 'WunderbarIR'
'' -> ''
"""
if not data: return data
text = ''.join([chr(int(v, 16)) for v in data.split()])
return text | e41f69fc43fe62efc4a39839ef3b9b7c4aa297a0 | 684,393 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.