content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def ints(int_list):
"""coerce a list of strings that represent integers into a list of integers"""
return [ int(number) for number in int_list ] | 64805b44c1a781386b9b5acbba4d4345388c6e53 | 693,010 |
def regrid_get_section_shape(src, axis_sizes, axis_indices):
"""Get the shape of each regridded section.
:Parameters:
src: `Field`
The source field.
axis_sizes: sequence
A sequence of the sizes of each axis along which the
section. will be taken
axis_indices: sequence
A sequence of the same length giving the axis index of
each axis.
:Returns:
`list`
A list of integers defining the shape of each section.
"""
shape = [1] * src.ndim
for i, axis_index in enumerate(axis_indices):
shape[axis_index] = axis_sizes[i]
return shape | f4a316a7f06a170c507345f52db90bb5b1e5c64c | 693,012 |
def pad_bytes32(instr):
""" Pad a string \x00 bytes to return correct bytes32 representation. """
bstr = instr.encode()
return bstr + (32 - len(bstr)) * b'\x00' | 76c057b64435f2bc9d0c3c92f1b9315fb6252fd9 | 693,015 |
def echo0(_, *ignored_):
"""simply return the 0th parameter"""
return _ | d8a6baa31c50383af1ec1bf0dbda5998fe7b6715 | 693,031 |
def _set_default(d, k, default):
"""Same behavior as dict.setdefault"""
if k in d:
return d[k]
else:
d[k] = default
return default | 9a86246d35a94042e06100e58085f3afa353a472 | 693,034 |
import re
def keyword_split(keywords):
"""
Return all the keywords in a keyword string.
Keeps keywords surrounded by quotes together, removing the surrounding quotes:
>>> keyword_split('Hello I\\'m looking for "something special"')
['Hello', "I'm", 'looking', 'for', 'something special']
Nested quoted strings are returned as is:
>>> keyword_split("He said \\"I'm looking for 'something special'\\" so I've given him the 'special item'")
['He', 'said', "I'm looking for 'something special'", 'so', "I've", 'given', 'him', 'the', 'special item']
"""
matches = re.findall(r'"([^"]+)"|\'([^\']+)\'|(\S+)', keywords)
return [match[0] or match[1] or match[2] for match in matches] | 02c43aba1cf94a65c878a25cdfea18a7b3048cea | 693,039 |
def lsm_loop_cond(exercise_index, cashflow):
"""Condition to exit a countdown loop when the exercise date hits zero."""
del cashflow
return exercise_index > 0 | e023ac99d03f1118399a1a5b8af5c26a8d81d864 | 693,044 |
import yaml
def ceph_repository_type_cdn(ansible_dir, installer_node):
"""
Fetches container image information from all.yml.sample
Args:
ansible_dir ansible directory on installer node
installer_node installer node to fetch all.yml.sample
Returns:
docker_registry, docker_image, docker_image_tag
"""
out, err = installer_node.exec_command(
sudo=True,
cmd="cat {ansible_dir}/group_vars/all.yml.sample".format(
ansible_dir=ansible_dir
),
)
sample_conf = yaml.safe_load(out)
docker_registry = sample_conf.get("ceph_docker_registry")
docker_image = sample_conf.get("ceph_docker_image")
docker_image_tag = sample_conf.get("ceph_docker_image_tag")
return docker_registry, docker_image, docker_image_tag | c8f60f0a0fdf4d135ba9355582b6e4f3bdd98b22 | 693,045 |
def _create_args(objects, mail_data):
"""Create args to call bulk update/create"""
if not objects:
return {}
arg_list = {}
arg_list["objects"] = [
{"type": obj[0], "id": int(obj[1])} for obj in objects
]
arg_list["mail_data"] = mail_data
return arg_list | 7d0e56961139b1848614ed4079ccbb3ed429d234 | 693,046 |
def _row_to_dict(cursor, row):
"""Produce a dict from a database row"""
return {col[0]: row[idx] for idx, col in enumerate(cursor.description)} | 1b6b554ec7d7731d2f9c74131d6cbb4c54cf46bd | 693,047 |
def original_id(individualized_id):
"""
Gets the original id of an ingredient that has been transformed by individualize_ingredients()
Args:
individualized_id (str):
Returns:
str:
Examples:
>>> original_id('en:water**')
'en:water'
>>> original_id('en:sugar')
'en:sugar'
"""
return individualized_id.strip('*') | a16bec10606af4ec9707d0c4961d57aac576f8ec | 693,048 |
def _removeInvalidChars(line):
"""Return a copy of line with each ASCII control character (0-31),
and each double quote, removed."""
output = ''
for c in line:
if c >= ' ' and c != '"':
output = output + c
return output | 5a3f0014ec29df0dce12287ab13ba23a8b31f496 | 693,049 |
def convert_back_to_year(val):
"""
get something between 0 and 1, return a year between 1922 and 2011
"""
assert val >= 0 and val <= 1
return 1922. + val * ( 2011. - 1922. ) | 3e378b53063503a5c4ffb13e4bdecc235bf10042 | 693,051 |
import time
def datetime_to_ms(dt):
"""
Convert an unaware datetime object to milliseconds. This will
be a UTC time. The SMC stores all times in UTC and will do the
time conversions based on the local timezone.
Example of converting a datetime to milliseconds::
utc_time = datetime.strptime("2018-06-04T00:00:00", "%Y-%m-%dT%H:%M:%S")
datetime_to_ms(utc_time)
:param dt datetime: pass in python datetime object.
:return: value representing the datetime in milliseconds
:rtype: int
"""
return int(time.mktime(dt.timetuple()) * 1000) | 4fe473d0a563c54846e1f0be8d9fc879883c0122 | 693,053 |
def _to_list(val):
"""Return the variable converted to list type."""
if isinstance(val, list):
return val
else:
return [val] | 4fddae97a267fd879182cec75b1fdb553a892857 | 693,056 |
def dscp_to_tos(dscp):
"""Convert dscp value to tos."""
tos = int(bin(dscp * 4), 2)
return tos | 02e644bbb04beb7f588e1cdf3f8661957325b588 | 693,061 |
import re
def repl(lookup, string):
"""Replaces keywords within a string.
Args:
lookup: dict in which to look up keywords.
string: string with embedded keywords. Ex. %key%.
Return:
String containing the replacements."""
return re.sub("%([\w_]+)%",
#If it is not in the lookup, leave it alone
lambda m: lookup[m.group(1)]
if
m.group(1) in lookup
else
"%{0}%".format(m.group(1)),
string) | 6d74a90e05f83240b5e02f3f076124d8e9061c2d | 693,064 |
def sort_files(files):
"""Returns a sorted version of the given list of File's (or other structures
that define an 'id' data member). The files will be sorted according to their
id, and duplicate entries will be removed.
Parameters
----------
files : list of :py:class:`bob.db.base.File`
The list of files to be uniquified and sorted.
Returns
-------
sorted : list of :py:class:`bob.db.base.File`
The sorted list of files, with duplicate `BioFile.id`\s being removed.
"""
# sort files using their sort function
sorted_files = sorted(files)
# remove duplicates
return [f for i, f in enumerate(sorted_files) if
not i or sorted_files[i - 1].id != f.id] | 3ada6e925da246ff31ef63fdb863e9318ac17792 | 693,065 |
from typing import Union
from pathlib import Path
import re
def _alter_spark_sql(sql: str, hadoop_local: Union[str, Path]) -> str:
"""Handle special paths in SQL code so that it can be used locally.
:param sql: A SQL query.
:param hadoop_local: The local path of Hadoop.
:return: The altered SQL code which can be used locally.
"""
sql = re.sub(r"viewfs://[^/]+/", "/", sql)
prefixes = ["/sys/", "/apps", "/user"]
for prefix in prefixes:
sql = sql.replace(prefix, f"{hadoop_local}{prefix}")
return sql | 83389ac58f0e976119274654f2078ed2e5c3ae47 | 693,066 |
from datetime import datetime
def from_timestamp(timestamp: str) -> datetime:
"""Parses the raw timestamp given by the API into a :class:`datetime.datetime` object."""
return datetime.strptime(timestamp, "%Y%m%dT%H%M%S.000Z") | 81d3c0a5297fa0053ae9a951147fac6f46907956 | 693,067 |
def format_action(a):
"""
Convert Action object into string representation for pompdp file
e.g scan machine (0, 0)
0.0scan
e.g. exploit service 1 on machine (1, 0)
1.0exp1
"""
address = "a{0}{1}".format(a.target[0], a.target[1])
if a.is_scan():
return address + "scan"
else:
return "{0}exp{1}".format(address, a.service) | b701538922db069ba550bfa6abeeb2fed02ce30a | 693,068 |
def pos_next_to(pos_a, pos_b):
"""
Test if two positions are next to each other.
The positions have to line up either horizontally or vertically,
but positions that are diagonally adjacent are not counted.
"""
xa, ya = pos_a
xb, yb = pos_b
d = abs(xa - xb) + abs(ya - yb)
return d == 1 | eae09addf8a119c5ce9f401af8703068ea98c8c9 | 693,070 |
import random
import string
def gen_dummy_object(class_, doc):
"""Create a dummy object based on the definitions in the API Doc."""
object_ = {
"@type": class_
}
if class_ in doc.parsed_classes:
for prop in doc.parsed_classes[class_]["class"].supportedProperty:
if "vocab:" in prop.prop:
prop_class = prop.prop.replace("vocab:", "")
object_[prop.title] = gen_dummy_object(prop_class, doc)
else:
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
return object_ | 86e981c7f62ccddda0463145c825d43dc1c3c476 | 693,072 |
import torch
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0):
"""3x3 convolution with padding"""
return torch.nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, groups=groups, bias=False, dilation=dilation) | 9df1275c4b671e8f97c9831323fd32a3916a6652 | 693,075 |
def remove_moves_occupied(state, moves):
"""
Prende una lista di mosse e rimuove le posizioni già occupate guardando la board (state.board)
:param state:
:param moves:
:return:
"""
for index, value in enumerate(state.board):
if value != 'O':
if index in moves:
moves.remove(index)
return moves | 48ab5fadf06152c99d0c4d05eb6ef615abf55e28 | 693,076 |
def scale01(arr):
"""
Linearly scale the values of an array in the range [0,1]
:param arr: input ndarray
:return: scaled ndarray
"""
return arr / arr.max() | 335f6fdb96fc47facab63827322816382280bfbe | 693,077 |
def to_plotly_rgb(r, g, b):
"""Convert seaborn-style colour tuple to plotly RGB string.
Args:
r (float): between 0 to 1
g (float): between 0 to 1
b (float): between 0 to 1
Returns: a string for plotly
"""
return f"rgb({r * 255:.0f}, {g * 255:.0f}, {b * 255:.0f})" | a08723dda4be60609bc60498fb1879b876eea71f | 693,085 |
import pickle
def load_annotation_file(filename):
"""
Load the annotation file.
"""
return pickle.load(open(filename, 'rb')) | d418eb6ac5f2842d7cef505a48e553aeed366796 | 693,088 |
import torch
def linspace(start: torch.Tensor, stop: torch.Tensor,
num_samples: int) -> torch.Tensor:
"""Generalization of linspace to arbitrary tensors.
Args:
start (torch.Tensor): Minimum 1D tensor. Same length as stop.
stop (torch.Tensor): Maximum 1D tensor. Same length as start.
num_samples (int): The number of samples to take from the linear range.
Returns:
torch.Tensor: (D, num_samples) tensor of linearly interpolated
samples.
"""
diff = stop - start
samples = torch.linspace(0, 1, num_samples)
return start.unsqueeze(-1) + samples.unsqueeze(0) * diff.unsqueeze(-1) | 44155176573f276937b10292cc11f45de1d0d277 | 693,092 |
def contains_all_value(page):
"""
Validate if both title and text are set
Returns
--------
bool
True if all values are set
False if not all values are set
"""
if len(page.getElementsByTagName('title')) == 0:
return False
if len(page.getElementsByTagName('text')) == 0:
return False
return True | d2098f9db6cf98cdaf29e0c57438814c8339b095 | 693,093 |
def getPersonURL(person):
"""
Return the address the view for this Person is available at.
"""
return person.organizer.linkToPerson(person) | a7836e80ba92c16011742a49b5d8ce1cc91d8996 | 693,097 |
def get_syslog_facility(facility):
""" get_syslog_facility() -- Get human-readable syslog facility name.
Args (required):
facility (int) - Facility number.
Returns:
Name of facility upon success.
"UNKNOWN" on failure.
"""
facilities = (
(0, "KERNEL"),
(1, "USER"),
(2, "MAIL"),
(3, "DAEMON"),
(4, "AUTH"),
(5, "SYSLOG"),
(6, "LPR"),
(7, "NEWS"),
(8, "UUCP"),
(9, "TIME"),
(10, "AUTH"),
(11, "FTP"),
(12, "NTP"),
(13, "AUDIT"),
(14, "ALERT"),
(15, "CLOCK"),
(16, "LOCAL0"),
(17, "LOCAL1"),
(18, "LOCAL2"),
(19, "LOCAL3"),
(20, "LOCAL4"),
(21, "LOCAL5"),
(22, "LOCAL6"),
(23, "LOCAL7"),
)
try:
return facilities[facility][1]
except (IndexError, TypeError):
return "UNKNOWN" | 1508eba6b68d7e8c499effba6b678e29ded9f9ae | 693,102 |
def find_by_key(target: str, data: dict) -> str:
"""
Returns the value of the target key from a nested Python dictionary.
"""
for key, value in data.items():
if isinstance(value, dict):
return find_by_key(target, value)
elif key == target:
return value
return "" | ce37416b4e5e36ccbe2a89938a74ada0a951d55e | 693,107 |
from typing import List
from typing import Any
def reverse_list(L: List[Any], N: int) -> List[Any]:
"""Cuts the list after the N-th element and reverses its order.
Parameters
----------
L : list
List to be reversed.
N : int
Index at which the list will be cut if it is smaller than the list length.
Returns
-------
list
Reversed list.
"""
if N < len(L):
L = L[:N]
reversed_list = [L[len(L) - 1 - i] for i, x in enumerate(L)]
return reversed_list | 591f9e9cc5966a6a30d9ca0c7240575cfb488d5c | 693,108 |
def add(path: str, content: str, encoding: str = "utf-8") -> int:
"""
add(path: str, content: str, encoding = "utf-8") -> int ---- Add content into file. Return amount of written symbols.
If file doesn't exist, create it.
"""
with open(path, "a", encoding = encoding) as file:
size = file.write(content)
return size | c225d5ec2c224250514ac3fc4fe358e70627eea5 | 693,109 |
def convert_list_for_sql(my_list):
""" Convert a python list to a SQL list.
The function is primarly used when trying to format SQL queries by passing an argument.
Arguments:
my_list: list of elements to be used in a SQL query
Example:
1. convert_list_for_sql([1, 2, 3]) returns '1, 2, 3'
2. convert_list_for_sql(['Simone', 'Dan']) returns ''Simone', 'Dan''
"""
final_list = []
for item in my_list:
if isinstance(item, str):
_item = '\'{}\''.format(item)
else:
_item = item
final_list.append(_item)
return ", ".join([str(item) for item in final_list]) | a2c3d5f43b5a0ba6f52b7c0a086b5e68c74ba13f | 693,112 |
def get_attrs(obj, config_attrs):
"""
Given an object obtains the attributes different to None.
:param obj: object containing the attributes.
:param config_attrs: a list of all the configurable attributes within the
object.
:return attr_data_dict: A dictionary containing all the attributes of the
given object that have a value different to None.
"""
attr_data_dict = {}
for attr_name in config_attrs:
attr_data_dict[attr_name] = getattr(obj, attr_name)
return attr_data_dict | 18348e05d799406961169dcb195531b25fe03946 | 693,119 |
def asm_label(address):
"""
Return a local label name for asm at <address>.
"""
return '.asm_%x' % address | 81a9f6a722b22bb0ccaaec9909e0e840baa7ab54 | 693,120 |
from pathlib import Path
def is_python(path: Path) -> bool:
"""Returns True if |path| ends in .py."""
return path.suffix == '.py' | dbedd33fad8ff4c1da84aef24b0cb7f4ef24f0cd | 693,123 |
def get_integrated_intensity(ave_azav, peak_bin, delta_bin=3):
"""
Get the average integrated intensity. Sum the bin values from
the peak bin to delta bin on each side.
Parameters
----------
ave_azav: ndarray
radially binned average azimuthal intensity from used curves
peak_bin: int
peak radial bin
delta_bin: int
number of bins on each side of peak to include in sum
Returns
-------
integrated_intensity: float
the calculated integrated intensity
"""
low = peak_bin - delta_bin
high = peak_bin + delta_bin
integrated_intensity = ave_azav[low: high].sum(axis=0)
return integrated_intensity | 2367ef173591d3b887406d4f18ac3b0a788b0baf | 693,124 |
import re
def parse_params(path):
"""Parse a path fragment and convert to a list of tuples.
Slashes separate alternating keys and values.
For example /a/3/b/5 -> [ ['a', '3'], ['b', '5'] ]."""
parts = re.split('/',path)
keys = parts[:-1:2]
values= parts[1::2]
return zip(keys,values) | c79bc783374f314c00c559fb61879e7671eb8f5a | 693,130 |
def circ_supply(height: int, nano: bool = False) -> int:
"""
Circulating supply at given height, in ERG (or nanoERG).
"""
# Emission settings
initial_rate = 75
fixed_rate_blocks = 525600 - 1
epoch_length = 64800
step = 3
# At current height
completed_epochs = max(0, height - fixed_rate_blocks) // epoch_length
current_epoch = completed_epochs + min(1, completed_epochs)
blocks_in_current_epoch = max(0, height - fixed_rate_blocks) % epoch_length
current_rate = max(0, initial_rate - current_epoch * step)
# Components
fixed_period_cs = min(fixed_rate_blocks, height) * initial_rate
completed_epochs_cs = sum(
[
epoch_length * max(0, initial_rate - step * (i + 1))
for i in range(completed_epochs)
]
)
current_epoch_cs = blocks_in_current_epoch * current_rate
# Circulating supply
cs = fixed_period_cs + completed_epochs_cs + current_epoch_cs
if nano:
cs *= 10**9
return cs | 3a0c1889ab5a0869ec4033d263489bbb08dd0864 | 693,131 |
import torch
def pad_tensor(tensor, seq_len):
"""Pad tensor with last element along 0 dimension."""
sz = list(tensor.size())
sz[0] = seq_len - tensor.size()[0] % seq_len
tail = tensor[-1].clone().expand(sz).to(tensor.device)
tensor = torch.cat((tensor, tail))
return tensor | 213593e4152dde391132b247a8f729fcbb284fec | 693,133 |
def badtoken(t):
"""check if t is punctuation, space, or newline char"""
return t.is_punct or t.text in [' ', '\n'] | 92432504e6d0f0fc0720747c7bf2e97b1fc59c90 | 693,135 |
def _parse_search_results(json_result):
"""Search results are divided into 'statuses' and 'search_metadata'. The former
contains the tweets themselves, and the latter contains the max_id to use to retrieve
the next batch of tweets"""
statuses = json_result.get('statuses')
metadata = json_result.get('search_metadata')
next_results = metadata.get('next_results')
return statuses, next_results | 63fb0a04297e65f0e2cdb723b0c03c8d46abfdaa | 693,141 |
def contains_unusual_content(result: dict) -> bool:
"""
returns True if the response indicates the PDF contains unusual content
(Launch, Sound, Movie, ResetForm, ImportData and JavaScript actions)
by checking if ISO 19005.1 clause 6.6.1 is among the failure reasons.
:param result: The parsed JSON response from POSTing a PDF to verapdf
:return: True if the PDF contains unusual content, otherwise False
"""
assertions = result["testAssertions"]
for assertion in assertions:
status = assertion["status"]
specification = assertion["ruleId"]["specification"]
clause = assertion["ruleId"]["clause"]
if status == "FAILED" and specification == "ISO_19005_1" and clause == "6.6.1":
return True
return False | 98d5fcacaf0c69dbe3c17e037d6b78232bfea9da | 693,142 |
def get_channels(
public, stable, server, intranet, group, add_dependent_channels=False
):
"""Returns the relevant conda channels to consider if building project.
The subset of channels to be returned depends on the visibility and
stability of the package being built. Here are the rules:
* public and stable: returns the public stable channel
* public and not stable: returns the public beta channel
* not public and stable: returns both public and private stable channels
* not public and not stable: returns both public and private beta channels
Public channels have priority over private channles, if turned.
Args:
public: Boolean indicating if we're supposed to include only public
channels
stable: Boolean indicating if we're supposed to include stable channels
server: The base address of the server containing our conda channels
intranet: Boolean indicating if we should add "private"/"public" prefixes
on the conda paths
group: The group of packages (gitlab namespace) the package we're
compiling is part of. Values should match URL namespaces currently
available on our internal webserver. Currently, only "bob" or "beat"
will work.
add_dependent_channels: If True, will add the conda-forge channel to the list
Returns: a list of channels that need to be considered.
"""
if (not public) and (not intranet):
raise RuntimeError(
"You cannot request for private channels and set"
" intranet=False (server=%s) - these are conflicting options"
% server
)
channels = []
channels_dict = {}
# do not use '/public' urls for public channels
prefix = "/software/" + group
if stable:
channels += [server + prefix + "/conda"]
channels_dict["public/stable"] = channels[-1]
else:
channels += [server + prefix + "/conda/label/beta"] # allowed betas
channels_dict["public/beta"] = channels[-1]
if not public:
prefix = "/private"
if stable: # allowed private channels
channels += [server + prefix + "/conda"]
channels_dict["private/stable"] = channels[-1]
else:
channels += [server + prefix + "/conda/label/beta"] # allowed betas
channels_dict["private/beta"] = channels[-1]
upload_channel = channels_dict[
"{}/{}".format(
"public" if public else "private", "stable" if stable else "beta"
)
]
if add_dependent_channels:
channels += ["conda-forge"]
return channels, upload_channel | b3378686aa6bf549c71393e43ac22966f8228f50 | 693,143 |
def validate_text(text):
""" Returns True if text exists and is more than white spaces, False otherwise."""
return bool(text) and not text.isspace() | 838dbf793c918d76def644256d0d15f4ef1e62bb | 693,148 |
import re
def normalize_whitespace(text):
"""
Replace non-breaking spaces with regular spaces,
collapse runs of whitespace, and strip whitespace from the ends of the string.
"""
s = text.replace(u"\xa0", u" ").replace(u" ", " ").replace(r"\S+", " ")
return re.sub(r"\s+", " ", s).strip() | c9c06c959be9455b39e5579d1bec4e3d4948e0c8 | 693,149 |
import pathlib
def name_from_path(path: str) -> str:
"""Generate a model name from the H5 path."""
name = pathlib.Path(path)
return name.name[: -len(name.suffix)] | 2cc77fb1d5694213ff5e28a70790e4e6540c9f56 | 693,156 |
def check_reference_allele(reference_base, bpm_record_group):
"""
Check whether the given reference base (on the plus strand) is queried
by any record in a group of BPMRecords
Args:
reference_base (string): The reference base
bpm_record_group (iter(BPMRecord)): Iterable of BPMRecords
Returns:
bool: True if record group contains reference allele
"""
if any([reference_base in record.plus_strand_alleles for record in bpm_record_group]):
return True
return False | 476c70e9e1ef3703b8b33b2c310808c6d713dbe2 | 693,160 |
def sieve_of_eratosthene(num):
"""
Computes prime numbers using sieve of Eratosthenes.
:param num: The number to which you need to find prime numbers.
:returns: List of prime numbers.
"""
sieve = list(range(num))
sieve[1] = 0 # All non-prime nums we'll replace by zeros.
for checked_num in sieve[2:]:
if checked_num != 0:
multiplier = checked_num * 2
while multiplier < num:
sieve[multiplier] = 0
multiplier += checked_num
return [n for n in sieve if n != 0] | 7fc8499b4f7d04a0cd94ce67b2a135cbd27faaa9 | 693,162 |
def greeting(name: str) -> str:
"""
Construct a greeting.
:param name: name of the person or object to greet
:return: greeting
"""
return f"Hello, {name}!" if name else "Hello!" | 16401457f2640e5495a6e932e0188704645ef578 | 693,164 |
def is_literal_value_packet(type: int) -> bool:
"""Check if the `type` corresponds to a literal value packet"""
return type == 4 | cf717881a446d9b8e4472543e92f45f8147fe89a | 693,165 |
def changes_between_snapshots(before_dict, after_dict):
"""Given two 'snapshots' of an artifacts structure -- 'before' and 'after' --
return a tuple specifying which artifacts have been added, which have been
removed, which have been modified, and which have remained unchanged. Both
these dictionaries have artifact names as the keys and their hashes as the
values."""
before_set = set(before_dict.keys())
after_set = set(after_dict.keys())
removed_artifacts = before_set.difference(after_set)
added_artifacts = after_set.difference(before_set)
unchanged_artifacts = set()
modified_artifacts = set()
for key in before_set.intersection(after_set):
if before_dict[key] == after_dict[key]:
unchanged_artifacts.add(key)
else:
modified_artifacts.add(key)
return (unchanged_artifacts, modified_artifacts, added_artifacts,
removed_artifacts) | 5bf02354c99179fbc3492eed37f8268f6c93b5c8 | 693,166 |
import torch
def inference_collate_batch(batch):
"""Collate a batch of data."""
feat_paths, mels = zip(*batch)
return feat_paths, torch.stack(mels) | a2ecd6ef4ea634ac453fa7e5cafce97c0dddcb9f | 693,174 |
def createOrderList(wantedOrder, currentOrder):
""" Create an order list that can transform currentOrder to
wantedOrder by applying applyOrderList function.
An order list is a list that specifies the position of the
desired element in a list in the correct order, e.g:
order of [3,1,2,4,6]->[1,2,3] which is got by using
createOrderList([1,2,3],[3,1,2,4,6]) is [1,2,0].
"""
return map(lambda x:currentOrder.index(x), wantedOrder) | 3456e04043d2a7a02de7daa67894546f7b50812d | 693,175 |
def parse_scoped_project_queue(scoped_name):
"""Returns the project and queue name for a scoped catalogue entry.
:param scoped_name: a project/queue as given by :scope_queue_name:
:type scoped_name: str
:returns: (project, queue)
:rtype: (str, six.text_type)
"""
return scoped_name.split('/') | 6ebf16b203aef62f43e2a4c34e4dbc2d7b52ac52 | 693,178 |
def scan_row(row):
"""get the first and last shaded columns in a row"""
start = 0
end = 0
for c, value in enumerate(row):
if value:
if start == 0:
start = c
end = c
return (start, end) | e8c4dcda56f20c52387cf1ea3be425c71df37de3 | 693,179 |
def Flickr30k_sentence_data(fn):
"""
Parses a sentence file from the Flickr30K Entities dataset.
:param fn: full file path to the sentence file to parse.
:return: a list of dictionaries for each sentence with the following fields:
sentence - the original sentence
phrases - a list of dictionaries for each phrase with the following fields:
phrase - the text of the annotated phrase
first_word_index - the position of the first word of the phrase in the sentence
phrase_id - an identifier for this phrase
phrase_type - a list of the coarse categories this phrase belongs to
"""
with open(fn, 'r') as f:
sentences = f.read().split('\n')
annotations = []
for sentence in sentences:
if not sentence:
continue
first_word = []
phrases = []
phrase_id = []
phrase_type = []
words = []
current_phrase = []
add_to_phrase = False
for token in sentence.split():
if add_to_phrase:
if token[-1] == ']':
add_to_phrase = False
token = token[:-1]
current_phrase.append(token)
phrases.append(' '.join(current_phrase))
current_phrase = []
else:
current_phrase.append(token)
words.append(token)
else:
if token[0] == '[':
add_to_phrase = True
first_word.append(len(words))
parts = token.split('/')
phrase_id.append(parts[1][3:])
phrase_type.append(parts[2:])
else:
words.append(token)
sentence_data = {'sentence': ' '.join(words), 'phrases' : []}
for index, phrase, p_id, p_type in zip(first_word, phrases, phrase_id, phrase_type):
sentence_data['phrases'].append({'first_word_index': index,
'phrase': phrase,
'phrase_id': p_id,
'phrase_type': p_type})
annotations.append(sentence_data)
return annotations | 6f183ba3e847a9f7f4fb6c97e484b9c4e4d6b79f | 693,180 |
def is_avcs_table(table):
""" True if table of AVC notifications """
phrase = 'attribute value change'
return phrase == table.short_title[:len(phrase)].lower() | 10ea2296d0ab8a64f531cb6d49c6832f900b896b | 693,182 |
def valid_conversion(val, type_to_convert):
"""
Checking whether it is possible to convert val to the specified type
:param val: value
:param type_to_convert: type
:return: boolean
"""
if isinstance(type_to_convert, type):
try:
type_to_convert(val)
res = True
except ValueError:
res = False
else:
raise TypeError
return res | 6dddf95d633c55b63e1ed96a7efe3e8a7c108045 | 693,184 |
import math
def variation_distance(data1, data2):
""" Returns the total variation distance between the distributions
represented by data1 and data2
"""
union_keys = list(set(data1.keys()) | set(data2.keys()))
distance = 0
for key in union_keys:
if key in data1:
val1 = data1[key]
else:
val1 = 0
if key in data2:
val2 = data2[key]
else:
val2 = 0
distance += math.fabs(val1 - val2)
return .5 * distance | 813b357fe08ae3f027cf39a5f2b166d21774b644 | 693,189 |
def zip_author(author):
"""
Give a list of author and its affiliation keys
in this following format
[first_name, last_name, [key1, key2]]
and return the output in
[[first_name, last_name, key1], [first_name, last_name, key2]] instead
"""
author_zipped = list(zip([[author[0], author[1]]] * len(author[-1]), author[-1]))
return list(map(lambda x: x[0] + [x[-1]], author_zipped)) | c3dd14b6173407ae4e5c646b28ceb693e7e6161d | 693,190 |
import shutil
def teardown_tempdir(path_to_dir):
"""Removes directory even if not empty."""
shutil.rmtree(path_to_dir)
return None | 3062b44f37de9162295ffbd8a70d3f49be20bed5 | 693,192 |
def loadHostDefaults(config):
"""
Load the defaults in "host" key of the configuration.
"""
if not config or not config.get("host") or not config["host"].get("url"):
return []
urls = config["host"]["url"].split(",")
return [{"key": "host", "src": url} for url in urls] | cd0bbb51f0e9a06469501754244497731ffbd804 | 693,200 |
def event_count(events):
"""Returns the total number of events in multiple events lists."""
return sum([len(e) for e in events]) | 5298bf42afae0c63c5d745965c47786d669a17b5 | 693,201 |
def format_memory_size(size):
"""
Returns formatted memory size.
:param size: Size in bytes
:type size: Number
:returns: Formatted size string.
:rtype: String
"""
if not size:
return 'N/A GB'
if size >= 1099511627776:
sizestr = '%.1f TB' % (float(size) / 1099511627776.0)
elif size >= 1073741824:
sizestr = '%.1f GB' % (float(size) / 1073741824.0)
elif size >= 1048576:
sizestr = '%d MB' % (int(size) / 1048576)
elif size >= 1024:
sizestr = '%d KB' % (int(size) / 1024)
else:
sizestr = '%d B' % int(size)
return sizestr | 9e8106b855ab810b80e0303574047edf0df2c49e | 693,203 |
def turn_into_list(object):
"""Returns a list containing the object passed.
If a list is passed to this function, this function will not create a
nested list, it will instead just return the list itself."""
if isinstance(object, list):
return object
else:
return [object] | eea2155b1a441fa1b84bcc8400d21c9263bdab48 | 693,204 |
async def is_document_exists(collection, id):
"""Determine if a document with a specific id exist in a collection or not"""
return await collection.count_documents({'_id': id}, limit=1) | 0b000f45b3ffa1e517fc69fef09aa8f5934b247b | 693,210 |
from typing import List
def _solve_tridiagonal_matrix(a: List[float], b: List[float], c: List[float], r: List[float]) -> List[float]:
""" Solves the linear equation system given by a tri-diagonal Matrix(a, b, c) . x = r.
Matrix configuration::
[[b0, c0, 0, 0, ...],
[a1, b1, c1, 0, ...],
[0, a2, b2, c2, ...],
... ]
Args:
a: lower diagonal [a0 .. an-1], a0 is not used but has to be present
b: central diagonal [b0 .. bn-1]
c: upper diagonal [c0 .. cn-1], cn-1 is not used and must not be present
r: right-hand side quantities
Returns:
vector x as list of floats
Raises:
ZeroDivisionError: singular matrix
"""
n = len(a)
u = [0.0] * n
gam = [0.0] * n
bet = b[0]
u[0] = r[0] / bet
for j in range(1, n):
gam[j] = c[j - 1] / bet
bet = b[j] - a[j] * gam[j]
u[j] = (r[j] - a[j] * u[j - 1]) / bet
for j in range((n - 2), -1, -1):
u[j] -= gam[j + 1] * u[j + 1]
return u | 3f28f4e7fc26dc46b59c269ab2547561eaab749f | 693,213 |
from typing import List
def permutar(arr: List[int]) -> List[List[int]]:
"""Devuelve todas las permutaciones del array.
:param arr: Lista de enteros únicos.
:arr type: List[int]
:return: Lista de permutaciones.
:rtype: List[List[int]]
"""
if len(arr) == 0:
return []
if len(arr) == 1:
return [arr]
t = []
for i in range(len(arr)):
r = arr[:i] + arr[i+1:]
for p in permutar(r):
t.append([arr[i]] + p)
return t | 98af485e0834d5f108312fa188d3e21fc23bec8e | 693,217 |
def polygon2pathd(polyline_d):
"""converts the string from a polygon points-attribute to a string for a
Path object d-attribute.
Note: For a polygon made from n points, the resulting path will be
composed of n lines (even if some of these lines have length zero)."""
points = polyline_d.replace(', ', ',')
points = points.replace(' ,', ',')
points = points.split()
reduntantly_closed = points[0] == points[-1]
d = 'M' + points[0].replace(',', ' ')
for p in points[1:]:
d += 'L' + p.replace(',', ' ')
# The `parse_path` call ignores redundant 'z' (closure) commands
# e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')`
# This check ensures that an n-point polygon is converted to an n-Line path.
if reduntantly_closed:
d += 'L' + points[0].replace(',', ' ')
return d + 'z' | 49462a408bb38eb85cdc68034413fb3b45512826 | 693,221 |
def build_compound(compound):
"""Build a compound
Args:
compound(dict)
Returns:
compound_obj(dict)
dict(
# This must be the document_id for this variant
variant = str, # required=True
# This is the variant id
display_name = str, # required
combined_score = float, # required
rank_score = float,
not_loaded = bool
genes = [
{
hgnc_id: int,
hgnc_symbol: str,
region_annotation: str,
functional_annotation:str
}, ...
]
)
"""
compound_obj = dict(
variant=compound["variant"],
display_name=compound["display_name"],
combined_score=float(compound["score"]),
)
return compound_obj | b01868e84ba453b742e0ae4dc7a08c29a171860a | 693,222 |
import requests
import time
def get_results_with_retry(wdt_sparql_url, query):
"""
Run SPARQL query multiple times until the results are there.
"""
while True:
try:
r = requests.get(wdt_sparql_url,
params = {'format': 'json', 'query': query})
# res_text=r.text
# response = json.loads(res_text)
response = r.json()
break
except Exception as e:
print(e, 'error, retrying')
time.sleep(2)
continue
return response | 54f6bc3fe79924d2c914867d7ce6b6cadf6c8497 | 693,225 |
def print_palindromes(palindrome_dict):
"""
Given a dictionary with palindrome positions as keys, and
lengths as first element of the value,
print the positions and lengths separated by a whitespace,
one pair per line.
"""
for key, value in palindrome_dict.items():
print(key, value[0])
return None | 7b30402a16ef0e7f7b7e8643ae3d2efcc72294a8 | 693,226 |
def formatTuples(tuples):
"""
Renders a list of 2-tuples into a column-aligned string format.
tuples (list of (any, any): The list of tuples to render.
Returns: A new string ready for printing.
"""
if not tuples:
return ""
tuples = [(str(x), str(y)) for (x, y) in tuples]
width = max([len(x) for (x, y) in tuples])
fmt = " {{:{}}} {{}}\n".format(width + 2)
result = ""
for (x, y) in tuples:
result += fmt.format(x, y)
return result | f078aa0b512c0ce63a8d4a06ea0a2d3df31bde7b | 693,228 |
def normalized_thread_name(thread_name):
"""
Simplifies a long names of thread (for Zafira UI),
e.g. MainThread -> MT, ThreadPoolExecutor -> TPE, etc.
:param thread_name: thread name from Log Record object
:return: simplified thread name
"""
normalized = ''
for symb in thread_name:
if symb.isupper():
normalized += symb
return normalized | 02c886b7081eac0e289c4566ab3c70a9fb92a5f6 | 693,229 |
def _subexon_ranks(strand, transcript_len):
"""
Return a list of the subexon ranks.
NOTE: Rank starts in 0 to make this compatible with
end_phase_previous_exon that expect exon_pos(ition) to start in 0.
>>> _subexon_ranks(-1, 10)
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> _subexon_ranks(1, 10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
if strand == -1:
return list(range(transcript_len - 1, -1, -1))
return list(range(0, transcript_len)) | efcf0e376d0666c7ca018e220fab598abf80eec8 | 693,231 |
def check_not_mnist_files(path):
"""Filters some bad files in NotMNIST dataset."""
bad_paths = [
"RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png",
"Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png",
]
for bad_path in bad_paths:
if bad_path in path:
return False
return True | 2b663f4272432540ec4de1903539d9275ac2ce8c | 693,233 |
from typing import Dict
from typing import List
from typing import Tuple
from typing import Optional
def deconstruct_entry(entry: Dict[str, str],
username_fields: List[str],
sightings_fields: List[str]) -> Tuple[Optional[str],
Optional[str],
Optional[int]]:
"""
deconstruct_entry
Extracts device relevant fields from a log entry.
:type entry: ``Dict[str, str]``
:param entry: Log entry as dictionary of fields.
:type sightings_fields: ``List[str]``
:param sightings_fields: List of possible field names in log entry to be considered as number of occurences.
:type username_fields: ``List[str]``
:param username_fields: List of possible field names in log entry to be considered as username.
:return: Tuple where the first element is the username or None, the second is the domain extracted from the
username field and the third element is the number of occurences of the event.
:rtype: ``Tuple[Optional[str], Optional[str], Optional[int]]``
"""
username = next((entry[field] for field in username_fields if field in entry), None)
sightings = next((int(entry[field]) for field in sightings_fields if field in entry), 1)
domain = None
if username is not None and "\\" in username:
domain, username = username.split("\\", 1)
return username, domain, sightings | fc3b6f1d5386cc5b2fc6e4a7efb8ec92033bf0f7 | 693,237 |
def _vrtWrapBand(vrtDataset, sourceBand):
"""
Wraps a GDAL raster band in a VRT band.
"""
# Retrieve the width and height from the source band
width = sourceBand.XSize
height = sourceBand.YSize
# Create the new VRT raster band
vrtDataset.AddBand(sourceBand.DataType)
vrtBand = vrtDataset.GetRasterBand(vrtDataset.RasterCount)
# Build the XML for the data source
bandSource = '''<SimpleSource>
<SourceFilename relativeToVRT="1">{}</SourceFilename>
<SourceBand>{}</SourceBand>
<SrcRect xOff="{}" yOff="{}" xSize="{}" ySize="{}"/>
<DstRect xOff="{}" yOff="{}" xSize="{}" ySize="{}"/>
</SimpleSource>'''.format(
sourceBand.GetDataset().GetFileList()[0],
sourceBand.GetBand(),
0, 0, width, height,
0, 0, width, height
)
# Add the data source to the VRT band
metadata = {}
metadata['source_0'] = bandSource
vrtBand.SetMetadata(metadata, 'vrt_sources')
return vrtBand | 44ea14188fdd79d2d8d5cfcdd2c175c782786062 | 693,239 |
def utcstr(ts):
"""
Format UTC timestamp in ISO 8601 format.
:param ts: The timestamp to format.
:type ts: instance of :py:class:`datetime.datetime`
:returns: Timestamp formatted in ISO 8601 format.
:rtype: unicode
"""
if ts:
return ts.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
else:
return ts | 980a401e157c7986b023c1e5fba8f5e9060de501 | 693,240 |
def get_strikes(df, strike, dte):
"""Get strike closest to underlying price"""
# Sort df by DTE
df = df[(df['daysToExpiration'] <= dte)]
#Sort df by strike
df =df.iloc[(df['strikePrice'] - strike).abs().argsort()[:1]]
df.reset_index(drop=True, inplace=True)
strike_price = df['strikePrice'][0]
#print("Strike:", strike_price)
return strike_price | b502825118c3410976ccb2de47272dde9692fcee | 693,243 |
def getkey(pstring):
"""Strips units and argument lists from a property/trajectory keyword.
Args:
pstring: The string input by the user that specifies an output,
which in general will specify units and argument lists.
Returns: A string giving the keyword for the property, stripped of the
argument lists and units key words.
"""
pa = pstring.find('(')
if pa < 0:
pa = len(pstring)
pu = pstring.find('{')
if pu < 0:
pu = len(pstring)
return pstring[0:min(pa,pu)].strip() | 9e10fc8e0316eb04074b429006e8bf14342b65ee | 693,253 |
from pathlib import Path
import hashlib
def get_hash(path: Path) -> str:
"""Get the SHA256 hash digest of a file."""
m = hashlib.sha256()
m.update(path.read_bytes())
return m.hexdigest() | 219b35627a647dfcfa1fac86d7bc43c27b4c8b10 | 693,254 |
import re
def is_line_function_definition(line: str) -> bool:
"""Returns true if the corresponding line (of a python file) is the start of a function definition.
Excludes functions that start with `__` which indicates a private function.
Args:
line: a line in a python file
"""
return bool(re.search('^( *)def ', line)) and 'def __' not in line | faf550e59d7eac5686b6881df515db8ab3caedcf | 693,255 |
def salt_cloud_cli(salt_master_factory):
"""
The ``salt-cloud`` CLI as a fixture against the running master
"""
return salt_master_factory.salt_cloud_cli() | e8bbecea3f63e14a5c6e52c90d154f9342ee57b2 | 693,258 |
def get_answers_for_question(current_user, question):
"""
Get the answers for a given question
:param question: neomodel room with answers
:return answers[]: array of answers
"""
answers_array = []
answers = question.answers
for a in answers:
answer = a.json()
if(a.users.is_connected(current_user)):
answer['answered'] = True
answers_array.append(answer)
return answers_array | cd70ec3b605edc0476b26976438e8f554046e2e1 | 693,262 |
def is_numpy(value):
"""
Determines whether the specified value is a NumPy value, i.e. an numpy.ndarray or a NumPy scalar, etc.
Parameters:
-----------
value:
The value for which is to be determined if it is a NumPy value or not.
Returns:
--------
boolean: Returns True if the value is a NumPy value and False otherwise.
"""
return type(value).__module__.split(".")[0] == "numpy" | 3bc294d739e9b108abf7cde3c072611890b7374a | 693,264 |
from datetime import datetime
def parse_time(argument):
""" Time parser to be used as type argument for argparser options. """
return datetime.strptime(argument, "%H:%M") | e6b8204f906f3ea2076058a2877e8f09a002319e | 693,265 |
def open_tdump(filename):
"""Opens the tdump file
Parameters
----------------
filename: string
Full file path for tdump file
Returns
-----------
tdump: _io.TextIOWrapper
tdump file opened in read mode, encoding UTF-8
ready to be used by other functions in this code
"""
tdump = open(filename)
return tdump | ea01161dee3c9d8e098befe860531c8cfa4800db | 693,266 |
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2) | 87f36bb2ae2bc900c73dce452eb39bf58a9f2b87 | 693,267 |
from functools import reduce
def union(sets):
"""
Returns the union of multiple sets.
"""
return reduce(lambda x, y: x | y, sets) | fb523e64f2a08f62ed1b90e029a9ed5afe8b02ac | 693,271 |
def get_task_no(job_id):
"""Extract the task number from the full job/job step ID"""
task_info = job_id.split('_')[1]
task_no = task_info.split('.')[0] if '.batch' in task_info else task_info
return int(task_no) | 2c8f9934c96ecc96adfcba927e27dd4ea46d590d | 693,272 |
import torch
def collate_batch(batch):
""" Combines different samples in order to form a batch
Arguments:
batch (list): list of dictionaries
Returns:
dict where each sample has been concatenated by type
"""
result = {}
for key in batch[0].keys():
values = (elem[key] for elem in batch)
values = tuple(values)
result[key] = torch.cat(values)
return result | 1b9c180e2dfc21eb87d9466769b43670be3e7b8e | 693,277 |
def CorrectUpdateMask(ref, args, request):
"""Returns the update request with the corrected mask.
The API expects a request with an update mask of 'schema', whereas the inline
schema argument generates an update mask of 'schema.columns'. So if --schema
was specified, we have to correct the update mask.
Args:
ref: The entry resource reference.
args: The parsed args namespace.
request: The update entry request.
Returns:
Request with corrected update mask.
"""
del ref
if args.IsSpecified('schema'):
request.updateMask = request.updateMask.replace('schema.columns', 'schema')
return request | 5967c1bc844b9000732e1f9fda8dbce611faaa1d | 693,278 |
def binary_search_iterative(list_, p, r, element):
""" ricerca binaria versione iterativa
Parametri:
list_ (list): lista di numeri interi
p (int): indice di inzio dell'array
r(int): indice di fine dell'array
element (int): elemento da trovare
Valore di Ritorno:
int: indice dell'elemento all'interno dell'array, altrimenti se non è presente ritorna -1
"""
# Controllo che l'elemento non sia fuori dal range della lista
if element < list_[p] or element > list_[r]:
return -1
while p <= r:
q = (p + r) // 2 # trovo l'elemento centrale
if list_[q] == element:
return q
if list_[q] > element:
r = q - 1
else:
p = q + 1
return -1 | bca6572347671fd9974eefe03ba8ba3fc9d1c339 | 693,279 |
def s(switch_num: int) -> str:
""" Returns the switch name for a given switch number """
return f's{switch_num}' | cfef24939ef690892a72e1f144a4f1e76b2be37b | 693,281 |
def dlv_strip(line):
"""Strips away the DLV title and stuff, if present."""
if line[:4] == "Cost" or line[:3] == "DLV":
return ""
if line[:12] == "Best model: ":
line = line[12:]
line = line.strip()
return line | cf0a3627b35ae5b540d6b73e716998ec73e6040e | 693,286 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.