content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def _get_train_steps(num_examples, train_epochs, train_batch_size):
"""Determine the number of training steps."""
return num_examples * train_epochs // train_batch_size + 1 | 8fe187059e2050f599fcec0d707a0c3fcb4f857e | 688,143 |
def make_synteny(genes, isoforms):
"""Return synteny for a list of genes and dict of isoforms."""
return len(list(set([isoforms.get(gene) for gene in genes]))) | 2ad56624ee2268d9bbf76e2af98977819fd89526 | 688,144 |
import torch
def view_as_real(data):
"""Named version of `torch.view_as_real()`"""
names = data.names
return torch.view_as_real(data.rename(None)).refine_names(*names + ("complex",)) | 8ae3c540ca1e3cda62ecc3099b930c8b35a8687c | 688,147 |
def bitstring_readable(data, batch_size, model_output=None, whole_batch=False):
"""Produce a human readable representation of the sequences in data.
Args:
data: data to be visualised
batch_size: size of batch
model_output: optional model output tensor to visualize alongside data.
whole_batch: whether to visualise the whole batch. Only the first sample
will be visualized if False
Returns:
A string used to visualise the data batch
"""
def _readable(datum):
return '+' + ' '.join(['-' if x == 0 else '%d' % x for x in datum]) + '+'
obs_batch = data.observations
targ_batch = data.target
iterate_over = range(batch_size) if whole_batch else range(1)
batch_strings = []
for batch_index in iterate_over:
obs = obs_batch[batch_index, :, :]
targ = targ_batch[batch_index, :, :]
readable_obs = 'Observations:\n' + '\n'.join([_readable(obs_vector) for obs_vector in obs])
readable_targ = 'Targets:\n' + '\n'.join([_readable(targ_vector) for targ_vector in targ])
strings = [readable_obs, readable_targ]
if model_output is not None:
output = model_output[batch_index, :, :]
strings.append('Model Output:\n' + '\n'.join([_readable(output_vec) for output_vec in output]))
batch_strings.append('\n\n'.join(strings))
return '\n' + '\n\n\n\n'.join(batch_strings) | 342b6720dd30b1f8d8b984a5d49b09913050fd40 | 688,148 |
def parse_crs_string(string: str) -> str:
"""Parses a string to determine the CRS/spatial projection format.
Args:
string: a string with CRS/projection data.
Returns:
crs_type: Str in ["wkt", "proj4", "epsg", "string"].
"""
if "epsg:" in string.lower():
return "epsg"
elif "+proj" in string:
return "proj4"
elif "SPHEROID" in string:
return "wkt"
else:
return "string" | 6e730d767924be39244a9d1e08fe6895f1d4b1db | 688,149 |
def bytes_to_int(s):
"""Return converted bytestring to integer.
Args:
s: str of bytes
Returns:
int: numeric interpretation of binary string `s`
"""
# int type casts may return a long type
return int(s.encode('hex'), 16) | dc50db3af4e19ac6d9fe93c969590ead96e628a3 | 688,156 |
def make_space(space_padding=0):
"""
Return string with x number of spaces. Defaults to 0.
"""
space = ''
for i in range(space_padding):
space += ' '
return space | db846fdb426bc04526744daac8487e2a90320200 | 688,157 |
def rename_bindnames(tqry, li_adjust):
"""use this to alter the query template to match expected attribute names in bind objects/dictionaries
For example, a predefined query may be: "select * from customers where custid = %(custid)s"
But you are repeatedly passing bind dictionaries like {"customer" : "cust001"}, {"customer" : "cust002"}
in that case qry_template = rename_bindnames(qry_template, [("custid","customer")])
can make your client code simpler and speed it up as well.
"""
for bindname, attrname in li_adjust:
from_ = "%(" + bindname + ")s"
to_ = "%(" + attrname + ")s"
tqry = tqry.replace(from_, to_)
return tqry | 5e2d79772e1495d215f81166652b4449cb04a788 | 688,158 |
def _format(string):
""" Formats a class name correctly for checking function and class names.
Strips all non-alphanumeric chars and makes lowercase.
"""
return ''.join(list(filter(str.isalnum, string))).lower() | 0fbff1d0da8c3bd4b318613dfa039dcef664f11f | 688,160 |
def rldecode(A, n, axis=0):
"""
Decompresses run length encoding of array A along axis.
Synopsis:
B = rldecode(A, n, axis)
B = rldecode(A, n) # axis assumed to be 0
Arguments:
A (np.ndarray): Encoded array
n (np.ndarray): Repetition of each layer along an axis.
axis (Optional[int]): Axis of A where run length decoding is done.
Returns:
Uncompressed matrix
Example (1D-array) along default axis:
>>> A = np.array([1,4,5])
>>> n = np.array([4,2,1])
>>> print(rldecode(A, n))
[1 1 1 1 4 4 5]
Example (2D-array) along j-axis:
>>> A = np.array([
... [1,3,3,3],
... [2,4,3,3],
... [3,5,3,4],
... [4,6,3,5]])
>>> n = np.array([2,1,1,2])
>>> print(rldecode(A, n, axis=1))
[[1 1 3 3 3 3]
[2 2 4 3 3 3]
[3 3 5 3 4 4]
[4 4 6 3 5 5]]
"""
assert n.size > 0, "Length array was empty."
# repeat functions take 1d array
if n.ndim != 1:
assert n.ndim <= 2
assert n.shape[0] == 1 or n.shape[1] == 1
n = n.ravel()
return A.repeat(n, axis=axis) | 9ffa16774905f6c869eae719f6ff8b06d2a7fb13 | 688,169 |
def open_and_read_file(file_path):
"""Read the entire contents of the file in as a string."""
contents = open(file_path).read()
return contents | 21b5bc501a59f4e0d97839122a2b822b2988a1d0 | 688,179 |
def createFromDocument(doc):
"""
Create an empty JS range from a document
@param doc DOM document
@return a empty JS range
"""
return doc.createRange() | 3bd55c4f60b25bb089b592f9dfe65ea299230be8 | 688,185 |
def _lsb_2fold(aa, bit):
"""
This function embeds a pair of bits in 2/3 fold degenerative codon.
:param aa: amino acid information.
:param bit: bit (character 2 e.g. 0) which should be embedded in codon.
:return: watermarked codon (string) e.g. AGA.
"""
if bit == '0':
return aa["codons"][0]
else:
return aa["codons"][1] | 9730ddb9f13d9d3fe1191d7fd0bc81172ee5cfcd | 688,186 |
def encoder_type(encode):
"""
Takes the value sent from the user encoding menu and returns
the actual value to be used.
"""
return {
'0': "",
'1': "shikata_ga_nai",
'2': "",
'3': "MULTIENCODE",
'4': "BACKDOOR",
}.get(encode, "ERROR") | a64d7df749296af7b5bfc02f6db19fc75b2465de | 688,187 |
def bounding_box_circle(svg, node, font_size):
"""Bounding box for circle node."""
cx, cy = svg.point(node.get('cx'), node.get('cy'), font_size)
r = svg.length(node.get('r'), font_size)
return cx - r, cy - r, 2 * r, 2 * r | babbafb71e5fbf3e63c4a6ec31ba72545501cffb | 688,188 |
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes | 0f72f261ff1e0ee2d304321cd0bbc0af9c662b4b | 688,190 |
def _get_parameters_proto(host_calls_dictionary):
"""Get the FormalParameterProtos for the first host call in the dictionary."""
return host_calls_dictionary['host_calls'][0].parameters | 7d62ee04bc52fe29bd36a14366c96e8ce5542c46 | 688,192 |
def sql_flush(style, connection, only_django=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
statements = connection.ops.sql_flush(style, tables, connection.introspection.sequence_list())
return statements | 11ddd9a59bd03cf5e529984797325442f8dcf3cd | 688,194 |
def lit_eq(lit1,lit2):
""" Returns true lits are syntactically equal """
return lit1 == lit2 | 8346510f743c8639336a20d7101ffb95e33d494f | 688,199 |
def greedy_action(q, state):
"""
Computes the greedy action.
:param q: action-value table.
:type q: bidimensional numpy array.
:param state: current state.
:type state: int.
:return: greedy action.
:rtype: int.
"""
greedy_act = 0
q_max = q[state][greedy_act]
for action in range(q.shape[1]):
val = q[state][action]
if val > q_max:
greedy_act = action
q_max = val
return greedy_act | 00ead3adb1da74bbe9ca0aef24793c6bd711aa83 | 688,203 |
import yaml
def load_config_file(path):
"""
Load and parser yaml file.
Parameters:
path (str): full yaml path location
Returns:
dict: yaml file in parsed into a dict
"""
with open(path) as file:
return yaml.load(file, Loader=yaml.FullLoader) | f0343b876a7b34b75986ebe7ff8cd2b8c8df3ac2 | 688,206 |
def clean_nginx_git_tag(tag):
"""
Return a cleaned ``version`` string from an nginx git tag.
Nginx tags git release as in `release-1.2.3`
This removes the the `release-` prefix.
For example:
>>> clean_nginx_git_tag("release-1.2.3") == "1.2.3"
True
>>> clean_nginx_git_tag("1.2.3") == "1.2.3"
True
"""
if tag.startswith("release-"):
_, _, tag = tag.partition("release-")
return tag | 232da3ae24987fd921f5fcc9273f6218fc3a0371 | 688,207 |
def check_for_running_sfn(session, arn):
"""Check if a downsample step function already running
Args:
session (boto3.session):
arn (str): Step function arn
Returns:
(bool)
"""
client = session.client('stepfunctions')
resp = client.list_executions(stateMachineArn=arn, statusFilter='RUNNING', maxResults=1)
return 'executions' in resp and len(resp['executions']) > 0 | 49e01f8109642d31d36a4c1d2e280e046771ae15 | 688,209 |
def has_usable_review_ui(user, review_request, file_attachment):
"""Returns whether a review UI is set and can be used."""
review_ui = file_attachment.review_ui
return (review_ui and
review_ui.is_enabled_for(user=user,
review_request=review_request,
file_attachment=file_attachment)) | fc97f8d7cc2a6ad9b1689341510eeae20c6e4c8d | 688,212 |
def to_binary(number: int) -> str:
"""Convert a decimal number to a binary numbers.
:param number: The number to convert to binary
:return: The binary representation of the number
"""
return bin(number)[2:] | 8638930cb711fefd3732d65db632e7f24e597291 | 688,215 |
import base64
def base64_encode(string):
"""
base64's `urlsafe_b64encode` uses '=' as padding.
These are not URL safe when used in URL parameters.
Removes any `=` used as padding from the encoded string.
"""
encoded = base64.urlsafe_b64encode(string)
return encoded.rstrip(b"=") | b660feeba6cf17f5be6b49d406951277af8403ce | 688,220 |
def handler(value, **kwargs):
"""Split the supplied string on the given delimiter, providing a list.
Format of value:
<delimiter>::<value>
For example:
Subnets: ${split ,::subnet-1,subnet-2,subnet-3}
Would result in the variable `Subnets` getting a list consisting of:
["subnet-1", "subnet-2", "subnet-3"]
This is particularly useful when getting an output from another stack that
contains a list. For example, the standard vpc blueprint outputs the list
of Subnets it creates as a pair of Outputs (PublicSubnets, PrivateSubnets)
that are comma separated, so you could use this in your config:
Subnets: ${split ,::${output vpc::PrivateSubnets}}
"""
try:
delimiter, text = value.split("::", 1)
except ValueError:
raise ValueError("Invalid value for split: %s. Must be in "
"<delimiter>::<text> format." % value)
return text.split(delimiter) | 700321db3f92bf87c9a6ecc8dc38d4dd2ad14229 | 688,222 |
def replace_string_in_list(str_list: list, original_str: str, target_str: str):
"""
Replace a string in a list by provided string.
Args:
str_list (list): A list contains the string to be replaced.
original_str (str): The string to be replaced.
target_str (str): The replacement of string.
Returns,
list, the original list with replaced string.
"""
return [s.replace(original_str, target_str) for s in str_list] | 31649c8c7171518598f6b3fe4d5db1b46e5cd573 | 688,225 |
def _wait_before_serving(seconds):
"""Tell the server not to write to this socket for the specified time."""
def _helper(ps, soc):
ps.delay_writing_for(seconds * 1000, soc)
return _helper | df3a4c969b1d094f0ac7fe07c967980ab8500fc0 | 688,226 |
def test_stability(v1, v2, precision=10e-3):
"""tests if two lists of lists of floats are equal but a certain precision
Args:
v1 (list[list[float]]): first list containing ints
v2 (list[list[float]]): second list containing ints
precision (float, optional): the precision after which v1 and v2 are not equal
Returns:
bool: True if the two lists are close enought, False otherwise
"""
v1 = [x for y in v1 for x in y]
v2 = [x for y in v2 for x in y]
for x1, x2 in zip(v1, v2):
if abs(x2 - x1) > precision:
return False
return True | 07b12bd7255a7f88cff630b3b6f79513752b2cb3 | 688,228 |
def cast_elements_to_string(cast_list):
""" This function casts the top level elements of a list to strings. Note that it
does not flatten lists before doing so, so if its elements contain lists, it will
cast these lists to strings.
Apply flatten_list() before applying cast_elements_to_string() if you want to
change this behavior.
"""
if isinstance(cast_list, list):
return [str(element) for element in cast_list]
else:
raise TypeError('cast_elements_to_string() must be passed a list!') | 8e34768a8fd9f159a9770562ce20d637b94de0b0 | 688,232 |
import torch
def test_observe_get_and_verify_response_input_unit(tmp_observe_class, method, tmp_val, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iteratuve" or "functions". Leverage
monkeypatching and create false class to mock that greattunes._observe will be called inside
TuneSession class in greattunes.__init__. Rely on manual input for "iterative" option
"""
# # define class
cls = tmp_observe_class
cls.sampling["method"] = method
# monkeypatch the "support" functions _get_response_function_input, _read_response_manual_input
def mock_get_response_function_input():
return torch.tensor([[tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_get_response_function_input", mock_get_response_function_input
)
manual_tmp_val = tmp_val + 1.0
def mock_read_response_manual_input(additional_text):
return torch.tensor([[manual_tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_read_response_manual_input", mock_read_response_manual_input
)
# set kwarg response to None (so manually provided input is used)
kwarg_response = None
# run test
output = cls._get_and_verify_response_input(response=kwarg_response)
if method == "functions":
assert output[0].item() == tmp_val
elif method == "iterative":
assert output[0].item() == manual_tmp_val | a5b0d9bcb4ad7d893498e88395d297e84d7212b2 | 688,234 |
def no_walk_revctrl(dirname=''):
"""Return empty list.
"""
# Returning a non-empty list prevents egg_info from reading the
# existing SOURCES.txt
return [''] | 7b6e7a09f9a131c13774a328eee38e24c7f49f29 | 688,235 |
def get_rm(g):
"""Return membrane resistivity in Ohm*m^2
g -- membrane conductivity in S/m^2
"""
return 1/g | 803d4f05e702776053640280e25ca6656790cdc9 | 688,237 |
def load_md(path: str) -> list:
"""
Loads an existing file into a list.
:param path: path where the file is stored
:return: list with the lines from the file
"""
with open(path, "r", encoding="UTF-8") as mdfile:
return mdfile.readlines() | b2977ff137de15e0a32dd2898f0dafd9a5ba9663 | 688,238 |
def clean_name(name):
"""
Cleans a proposed character name.
"""
new_name = ''.join(ch for ch in name if ch.isalpha())
new_name = new_name.title()
return new_name | 8aa0d8c9b2e352d867d70248f1a1b0500561e401 | 688,240 |
def merge_dicts(*dicts, **kwargs):
"""Merge all dicts in `*dicts` into a single dict, and return the result. If any of the entries
in `*dicts` is None, and `default` is specified as keyword argument, then return `default`."""
result = {}
for d in dicts:
if d is None and "default" in kwargs:
return kwargs["default"]
if d:
result.update(d)
return result | e42717a86d9f92f8a4924dd1317e7fcc23ef0f97 | 688,243 |
def calc_median(values_list):
"""calculates the median of the list in O(n log n); thus also returns sorted list for optional use"""
median = 0.0
sorted_list = sorted(values_list)
n = len(sorted_list)
if n == 0:
return median, sorted_list, n
half = n >> 1
if n % 2 == 1:
median = sorted_list[half]
else:
median = 0.5 * (sorted_list[half] + sorted_list[half + 1])
return median, sorted_list, n | f6f77eb3b99b946b5d09acef3fc08756527af134 | 688,249 |
import json
def json_pp(json_object):
"""
Helper method to convert objects into json formatted pretty string
:param json_object: The object to be converted into pretty string
:return: A pretty formatted string
"""
formatted_json = json.dumps(json_object,
sort_keys=True,
indent=4,
separators=(',', ': '))
return formatted_json | 74e11e736d512137bfdfebef3230c66e48edce2f | 688,252 |
import string
def flow_key(flow):
"""Model a flow key string for ``ovs-ofctl``.
Syntax taken from ``ovs-ofctl`` manpages:
http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-ofctl.8
Example flow dictionary:
flow = {
'in_port': '1',
'idle_timeout': '0',
'actions': ['output:3']
}
:param flow: Flow description as a dictionary
:return: String
:rtype: str
"""
_flow_add_key = string.Template('${fields},action=${actions}')
_flow_del_key = string.Template('${fields}')
field_params = []
user_params = (x for x in list(flow.items()) if x[0] != 'actions')
for (key, default) in user_params:
field_params.append('%(field)s=%(value)s' %
{'field': key, 'value': default})
field_params_str = ','.join(field_params)
_flow_key_param = {
'fields': field_params_str,
}
# no actions == delete key
if 'actions' in flow:
_flow_key_param['actions'] = ','.join(flow['actions'])
flow_str = _flow_add_key.substitute(_flow_key_param)
else:
flow_str = _flow_del_key.substitute(_flow_key_param)
return flow_str | ef27317827587778315d10b0e55f75554c748c13 | 688,259 |
def cleanup_decorator(func):
"""Decorator which runs cleanup before and after a function"""
def clean_before_after(self, *args, **kwargs): # pylint: disable=missing-docstring
# pylint only complains about a missing docstring on py2.7?
self.cleanup()
result = func(self, *args, **kwargs)
self.cleanup()
return result
return clean_before_after | e993a1476f561284a8f85f2aee4dd91dba573bb9 | 688,265 |
def formatUintHex64(value):
"""
Format an 64 bits unsigned integer.
"""
return u"0x%016x" % value | af82b7b6dd138333f09cf93e9d7990be286333fd | 688,266 |
def get_account_id(sts_client):
"""Retrieve the AWS account ID for the authenticated user or role"""
response = sts_client.get_caller_identity()
return response['Account'] | 670961b88b978387c9c8a7dcffe364caae218bf6 | 688,268 |
def bull_engulf(Open, high, low, close, t=4):
"""
Identifies if prices is a Bullish Engulfing Pattern of not
Param:
Open: array of open prices (5-day)
high: array of high prices (5-day)
low: array of low prices (5-day)
close: array of close prices (5-day)
t: int num. day -1 (5-1=4)
Return:
status: boolean true if it is the pattern
"""
if len(Open) < 5:
raise AttributeError('Prices are not length 5')
if (Open[t] < close[t-1] and
close[t] > Open[t-1] and
Open[t] < close[t] and
Open[t-1] > close[t-1] and
(Open[t-2]>close[t-2] and Open[t-3]>close[t-3] and Open[t-4]>close[t-4]) and
(close[t-2]<close[t-3]<close[t-4])):
return True
return False | 375500d7517b866d8e738a52165ecc40b8931258 | 688,270 |
def _fit_one_ovo(bin_clf_idx, multi_ovo, dataset, verbose):
"""Fit the OVO classifier given an index.
This method fits a one-vs-one classifier wrt the
positive and negative labels taken from the list
clf_pair_idx at the index bin_clf_idx.
Parameters
----------
bin_clf_idx : int
Index of the binary classifier
multi_ovo : CClassifierMulticlassOVO
Instance of the multiclass OVO classifier.
dataset : CDataset
Training set. Must be a :class:`.CDataset` instance with
patterns data and corresponding labels.
verbose : int
Verbosity level of the logger.
"""
# Resetting verbosity level. This is needed as objects
# change id when passed to subprocesses and our logging
# level is stored per-object looking to id
multi_ovo.verbose = verbose
# Take the classes indices
tr_class_idx = multi_ovo._clf_pair_idx[bin_clf_idx][0]
vs_class_idx = multi_ovo._clf_pair_idx[bin_clf_idx][1]
multi_ovo.logger.info(
"Training class {:} against class: {:}".format(
tr_class_idx, vs_class_idx))
# Create the training dataset
train_ds = multi_ovo.binarize_subset(tr_class_idx, vs_class_idx, dataset)
# Extracting the internal classifier
classifier_instance = multi_ovo._binary_classifiers[bin_clf_idx]
# Setting verbosity level
classifier_instance.verbose = multi_ovo.verbose
# Training the one-vs-ne classifier
classifier_instance.fit(train_ds.X, train_ds.Y)
return classifier_instance | 31d8e6ce66e3f40a243777f80ce416c9309f4a1e | 688,271 |
def get2dgridsize(sz, tpb = (8, 8)):
"""Return CUDA grid size for 2d arrays.
:param sz: input array size
:param tpb: (optional) threads per block
"""
bpg0 = (sz[0] + (tpb[0] - 1)) // tpb[0]
bpg1 = (sz[1] + (tpb[1] - 1)) // tpb[1]
return (bpg0, bpg1), tpb | 9024bab4ccb6496622aef0e7e4c25b71a3057fc6 | 688,276 |
import torch
def pitchVocabularyFmt(X, vocab_col):
"""
Produces the tensors for training with a pitch vocabulary encoding.
"""
pitch = torch.tensor(X[:, vocab_col], dtype=torch.long)
score_feats = torch.cat([torch.tensor(X[:, :vocab_col], dtype=torch.float),
torch.tensor(X[:, vocab_col + 1:], dtype=torch.float)], dim=1)
return pitch, score_feats | df846df0ec36ecc770f6afa032d3d1433a7497c8 | 688,277 |
def IsSimulator(target_cpu):
"""Returns whether the |target_cpu| corresponds to a simulator build."""
return not target_cpu.startswith('arm') | 3abb2d1a6051c12cd3b1a9d54448352165378595 | 688,281 |
import socket
import struct
def ipv6_to_long(ip):
"""Return the IPv6 address string as a long
>>> ipv6_to_long("2001:db8::1")
42540766411282592856903984951653826561L
>>> ipv6_to_long("::1")
1L
"""
ip_bytes_n = socket.inet_pton(socket.AF_INET6, ip)
ip_parts = struct.unpack('!QQ', ip_bytes_n)
return 2**64 * ip_parts[0] + ip_parts[1] | 1c9f324dba9be791f6ec0fae9693047b06759b6d | 688,285 |
import functools
def join(*expressions):
""" Convenient function for joining many expressions in series
using ``ObserverExpression.then``
Parameters
----------
*expressions : iterable of ObserverExpression
Returns
-------
new_expression : ObserverExpression
Joined expression.
"""
return functools.reduce(lambda e1, e2: e1.then(e2), expressions) | 1e14369a7fa471b0b586287c7647980a789c6e6c | 688,286 |
def ssh_auth(username, address):
"""Render username and address part."""
if username:
return '{}@{}'.format(username, address)
return '{}'.format(address) | 2e44302f9d9e0048bfbc2c2013a9d170a9c38fce | 688,287 |
def round_int(value):
"""Cast the specified value to nearest integer."""
if isinstance(value, float):
return int(round(value))
return int(value) | 13522aa76ff75baa2f0e10e9840bc981c9e5d30b | 688,289 |
def accel_within_limits(v, a, v_range):
"""
Accelerate the car while clipping to a velocity range
Args:
v (int): starting velocity
a (int): acceleration
v_range (tuple): min and max velocity
Returns:
(int): velocity, clipped to min/max v_range
"""
v = v + a
v = max(v, v_range[0])
v = min(v, v_range[1])
return v | 0ed6da91424149c04e6d793a740d13b3ec9728ec | 688,293 |
from typing import Iterable
def check_type(data):
"""
Check type of an object. Return False if it is dictionary or list, True - otherwise.
"""
if isinstance(data, str):
return True
elif isinstance(data, Iterable):
return False
return True | 9a614c33215c0217af8a491bd9164d5220307ced | 688,299 |
def get_ap_vel(df, time_step, scaling_factor): # Calculates 'angular persistence', 'velocity', and 'directed velocity'
"""
Primary function called by "get_chemotaxis_stats" and
"get_chemotaxis_stats_by_interval". Calculates the 'Angular_persistence', 'Velocity',
and 'Directed_velocity' for each timepoint of each unique cell.
Parameters
----------
df: DataFrame
Typically supplied by the calling function. Must include columns labeled 'Time',
'Experiment_number', 'Cell_line', 'Cell_number', 'x', and 'y'. 'Time',
'Experiment_number', and 'Cell_number', must be series of integers; 'Cell_line'
must be a series of strings; 'x' and 'y' must be series of floats. IMPORTANT:
Ensure that 'x' and 'y' are in units of pixels.
time_step: integer
Typically supplied by the calling function. This value specifies the duration of
the interval between each timepoint for a cell track.
scaling_factor: float
Typically supplied by the calling function. Factor for conversion of 'x' and 'y'
series of 'df' from pixels to real units of length. IMPORTANT: If designing a
pipeline with other functions in this toolbox, ensure that the same real units of
length are used in all cases (e.g., everything is coverted to microns).
Returns
-------
output: DataFrame
This DataFrame contains all the original columns with the further addition of
'Velocity', 'Angular_persistence', and 'Directed_velocity' columns.
"""
diff_df = df[['x', 'y', 'x_from_center', 'y_from_center']].diff()
dot_product = df['x_from_center'] * diff_df['x_from_center'] + df['y_from_center'] * diff_df['y_from_center']
magnitude = (df['x_from_center']**2 + df['y_from_center']**2)**0.5 * (diff_df['x_from_center']**2 + diff_df['y_from_center']**2)**0.5
df['Angular_persistence'] = dot_product / magnitude * -1
df['Velocity'] = (diff_df['x']**2 + diff_df['y']**2)**0.5 * scaling_factor / time_step
df['Directed_velocity'] = df['Velocity'] * df['Angular_persistence']
return df | 6982c7628b82c286a825cbc767d4cf39cb31f783 | 688,300 |
def _num_tokens_of(rule):
"""Calculate the total number of tokens in a rule."""
total = len(rule.get("tokens"))
for _ in ("prev_classes", "prev_tokens", "next_tokens", "next_classes"):
val = rule.get(_)
if val:
total += len(val)
return total | eafebe556ee28fe1ab4699894266b47b4c1ed63b | 688,301 |
def sort_dataframe(dataframe, sort_column, order='ascending', nulls_position='last', inplace=True):
"""
Sort the dataframe by the sort column
Arguments are the dataframe and column that you want to sort by
Optional arguments are:
- order (default ascending, can be ascending or descending) which determines
whether to sort by the column ascending or descending
- nulls_position (default last, can be first or last) which determines
whether null values (NaN) are sorted first or last
- inplace (default True, can be True or False) which determines whether
to change the existing dataframe, or return a new dataframe
"""
if order == 'ascending':
ascending = True
else:
ascending = False
df = dataframe.sort_values(sort_column, ascending=ascending, na_position=nulls_position, inplace=inplace)
if not inplace:
return df | a4d87ef0ff18d38b0caa747b885194e4820cac77 | 688,306 |
def job_id() -> str:
"""Returns a mock job ID."""
return "00000000-0000-0000-0000-000000000000" | 038d5035a5e5f78e5f04b9535423bd594fa7ed3f | 688,307 |
import re
def _split_text_by_opening(pattern, text):
"""
Splits text into parts identified by opening that matches `pattern`.
For example, --pattern='\n\nCHAPTER \\d+\n\n' may be used
to split text into chapters.
"""
openings = re.findall(pattern, text)
if len(openings) == 0:
print(f'\n❗ No text matching pattern "{pattern}". Splitting is not performed.\n')
return []
texts = re.split(pattern, text)
texts = [d + t for d, t in zip(openings, texts[1:])]
return texts | 2082f62b35b96173159bab0d8935ac849bb14d42 | 688,309 |
import json
def format_json(data, default=None):
"""
Pretty print JSON.
Arguments:
data (dict): JSON blob.
Returns:
str: Formatted JSON
"""
return json.dumps(
data, sort_keys=True, indent=2, separators=(",", ": "), default=default
) | 1ff400089ccac98cc4199a537e1dd5633d2d27b4 | 688,311 |
def oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator) | 833270146c2661005a8733cb400c3ba203a8fdd0 | 688,312 |
from functools import reduce
import operator
def MergeDicts( *dicts ):
"""Construct a merged dictionary from the given dicts.
If two dicts define the same key, the key from the dict later in the list is chosen."""
return dict( reduce( operator.add, map( dict.items, dicts ) ) ) | 185cd5d082c3bf97a40cb8ad4d9e1197f25d1e1f | 688,322 |
def get_label_color(status):
"""
Get a customized color of the status
:param status: The requested status to get a customized color for
:return: customized color
"""
colors = {'NEW':'grey',
'ASSIGNED':'blue',
'OPEN': 'orange',
'FIXED': 'purple',
'RETEST':'cyan',
'REOPENED':'orange',
'VERIFIED': 'green',
'BLOCKED': 'red',
'CLOSED':'black',
}
return colors[status] | 6bcf168d653801999bc2c2528a426ec43afdd349 | 688,333 |
def remove_duplicates(seq):
"""
Removes duplicates from a list.
This is the fastest solution, source:
http://www.peterbe.com/plog/uniqifiers-benchmark
Input arguments:
seq -- list from which we are removing duplicates
Output:
List without duplicates.
Example:
>>> seq = ['a', 'a', 'b', 'c', 'a']
>>> print remove_duplicates_from_list(seq)
['a', 'b', 'c']
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | 460e585e04fb7f868e0216e6d82808c68be80a7d | 688,340 |
import torch
def compute_bboxes_from_keypoints(keypoints):
"""
keypoints: B x 68*2
return value: B x 4 (t, b, l, r)
Compute a very rough bounding box approximate from 68 keypoints.
"""
x, y = keypoints.float().view(-1, 68, 2).transpose(0, 2)
face_height = y[8] - y[27]
b = y[8] + face_height * 0.2
t = y[27] - face_height * 0.47
midpoint_x = (x.min(dim=0)[0] + x.max(dim=0)[0]) / 2
half_height = (b - t) * 0.5
l = midpoint_x - half_height
r = midpoint_x + half_height
return torch.stack([t, b, l, r], dim=1) | 25a3135c40e9b2e615b2d8dc2eba425ff38177b2 | 688,341 |
def write_matlabbatch(template, nii_file, tpm_file, darteltpm_file, outfile):
""" Complete matlab batch from template.
Parameters
----------
template: str
path to template batch to be completed.
nii_files: list
the Nifti image to be processed.
tpm_file: str
path to the SPM TPM file.
darteltpm_file: str
path to the CAT12 tempalte file.
outfile: str
path to the generated matlab batch file that can be used to launch
CAT12 VBM preprocessing.
"""
nii_file_str = ""
for i in nii_file:
nii_file_str += "'{0}' \n".format(i)
with open(template, "r") as of:
stream = of.read()
stream = stream.format(anat_file=nii_file_str, tpm_file=tpm_file,
darteltpm_file=darteltpm_file)
with open(outfile, "w") as of:
of.write(stream)
return 0 | 39cbf74ced5c35e171e18c5261b7f017adb8cd6c | 688,343 |
import html
import re
def reddit_sanitize( text ):
"""
Convert comments in the Reddit API format to actual plain-text likely
constructed by the individual who posted it. HTML is unescaped, markup
is removed, and quotes are removed.
"""
# Unescape HTML (IE, '>' becomes '>')
text = html.unescape( text )
# Remove markup
enclosed_text_regexes = [
re.compile( r"\*\*(\S+[^*]*\S+|\S)\*\*" ), # Bold
re.compile( r"\*(\S+[^*]*\S+|\S)\*" ), # Italic
re.compile( r"_(\S+[^_]*\S+|\S)_" ), # Undelrine
re.compile( r"\~\~(\S+[^\~]*\S+|\S)\~\~" ), # Strikethrough
re.compile( r"\>\!(\S+[^(!<)]*\S+|\S)\!\<" ), # Spoilers
re.compile( r"\^(\S+)" ), # Superscript
re.compile( r"\[([^\]]*)\]\([^\)]+\)" ), # Links, remove link but keep text.
]
for rgx in enclosed_text_regexes:
text = re.sub( rgx, r"\1", text )
# Remove quoted and preformatted lines
quote_filter_pred = lambda line: len( line ) <= 0 or line[ 0 ] != ">"
pref_filter_pred = lambda line: ( ( len( line ) <= 4 or line[ :4 ] != " " ) and
( len( line ) <= 0 or line[ 0 ] != "\t" ) )
lines = text.split( "\n" )
return "\n".join( [ x for x in lines if quote_filter_pred( x ) and pref_filter_pred( x ) ] ) | 304e3f0900a50d0e2d0e204f7391ba106bed805b | 688,350 |
def config2object(config):
"""
Convert dictionary into instance allowing access to dictionary keys using
dot notation (attributes).
"""
class ConfigObject(dict):
"""
Represents configuration options' group, works like a dict
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, val):
self[name] = val
if isinstance(config, dict):
result = ConfigObject()
for key in config:
result[key] = config2object(config[key])
return result
else:
return config | 1523329a0ba6495d1b23530aa6d02f9c953d7e51 | 688,354 |
import socket
def _get_available_ports(n: int) -> list[int]:
"""
Get available ports.
Parameters
----------
n : int
number of ports to get.
Returns
-------
list[int]
Available ports.
"""
socks: list[socket.socket] = [socket.socket() for _ in range(n)]
list(map(lambda sock: sock.bind(("", 0)), socks))
ports: list[int] = [int(sock.getsockname()[1]) for sock in socks]
for sock in socks:
sock.close()
return ports | 64e4f6f0683ff7df34a2e264c0e22d2d3a7414ec | 688,355 |
def mean(sequence):
"""
Calculates the arithmetic mean of a list / tuple
"""
return sum(sequence) / float(len(sequence)) | aa800eac51de57c9b4c7c5e2fe749f058cfe6c81 | 688,356 |
import textwrap
def proteins_to_fasta(proteins, seqids=[], use_safe_seqid=False, width=50):
"""
Takes a proteins dictionary and returns a string containing
all the sequences in FASTA format. Option parameters are
a list of seqids to output (seqids) and the line width (width).
"""
if seqids:
idlist = seqids
else:
idlist = proteins
fasta_out = ""
for seqid in idlist:
seq_wrap = textwrap.fill(proteins[seqid]['seq'], width)
if use_safe_seqid:
header = proteins[seqid]['safe_seqid']
else:
header = proteins[seqid]['name']
fasta_out += ">%s\n%s\n" % (header, seq_wrap)
return fasta_out | 569682abb4f8b0d62cba39f5720e09fb8baf7ec8 | 688,359 |
def lowercase(data):
"""Lowercase text
Args:
data (list,str): Data to lowercase (either a string or a list
[of lists..] of strings)
Returns:
list, str: Lowercased data
"""
if isinstance(data, (list, tuple)):
return [lowercase(item) for item in data]
elif isinstance(data, dict):
return {k: lowercase(v) for k, v in data.items()}
elif isinstance(data, str):
return data.lower()
else:
raise ValueError("Can only lowercase strings or lists of strings") | fd173620e8ddb58d5966b235a3b9236ebf01f9d5 | 688,363 |
def overlay_image(foreground_image, mask, background_image):
""" Overlay foreground image onto the background given a mask
:param foreground_image: foreground image points
:param mask: [0-255] values in mask
:param background_image: background image points
:returns: image with foreground where mask > 0 overlaid on background image
"""
blend_ratio = mask / 255
blend_ratio = blend_ratio.reshape(background_image.shape[0], background_image.shape[1], 1)
background_image[..., :3] = background_image[..., :3] * (1 - blend_ratio) + foreground_image[..., :3] * blend_ratio
return background_image | fb6b8a854e99fe984b6f57eb683a8f77a507e155 | 688,365 |
def eq_or_in(val, options):
"""Return True if options contains value or if value is equal to options."""
return val in options if isinstance(options, tuple) else val == options | bbaa3fbc91429adc7db4c6fcbcfeb860508ade21 | 688,366 |
from typing import Counter
def get_bow(tokenized_text):
"""
Function to generate bow_list and word_freq from a tokenized_text
-----PARAMETER-----
tokenized_text should be in the form of [['a'], ['a', 'b'], ['b']] format,
where the object is a list of survey response, with each survey response
as a list of word tokens
-----OUTPUT-----
The function returns two objects
bow_list: a list of Counter objects with word frequency of each response
word_freq: a Counter object that summarizes the word frequency of the input
tokenized_text
"""
bow_list = []
word_freq = Counter()
for text in tokenized_text:
bow = Counter(text)
word_freq.update(text)
bow_list.append(bow)
print(f"This corpus has {len(word_freq.keys())} key words, and the 10 \
most frequent words are: {word_freq.most_common(10)}")
return bow_list, word_freq | 656d9dab1b2bee350cecca5fd693fcbc3eafb2bd | 688,368 |
def is_same_py_file(file_1, file_2):
"""Compares 2 filenames accounting for .pyc files."""
if file_1.endswith('.pyc') or file_1.endswith('.pyo'):
file_1 = file_1[:-1]
if file_2.endswith('.pyc') or file_2.endswith('.pyo'):
file_2 = file_2[:-1]
return file_1 == file_2 | 897c6b84389290d98bf4fa449763a01c83354302 | 688,369 |
import torch
def loglikelihood(w, weights=None):
"""
Calculates the estimated loglikehood given weights.
:param w: The log weights, corresponding to likelihood
:type w: torch.Tensor
:param weights: Whether to weight the log-likelihood.
:type weights: torch.Tensor
:return: The log-likelihood
:rtype: torch.Tensor
"""
maxw, _ = w.max(-1)
# ===== Calculate the second term ===== #
if weights is None:
temp = torch.exp(w - (maxw.unsqueeze(-1) if maxw.dim() > 0 else maxw)).mean(-1).log()
else:
temp = (weights * torch.exp(w - (maxw.unsqueeze(-1) if maxw.dim() > 0 else maxw))).sum(-1).log()
return maxw + temp | fbaff2c7d99c11b6c7d5dd2296b8470fdd798e03 | 688,371 |
def resolve_crop(im, crop):
"""Convert a crop (i.e. slice definition) to only positive values
crops might contain None, or - values"""
# only works for two dimension
crop = list(crop)
assert len(crop) == 2
for i in (0, 1):
assert len(crop[i]) == 2
for j in (0, 1):
if crop[i][j] is None:
crop[i][j] = j * im.shape[1-j]
elif crop[i][j] < 0:
crop[i][j] += im.shape[1-j]
return crop | 791331619401664f9cb4c4d01f852b39a568f585 | 688,372 |
def normalize_spaces(s):
"""replace any sequence of whitespace
characters with a single space"""
return ' '.join(s.split()) | f602797e46ec70309326fa71b305e24d2c180190 | 688,375 |
def checksum(digits):
"""
Returns the checksum of CPF digits.
References to the algorithm:
https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo
https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm
"""
s = 0
p = len(digits) + 1
for i in range(0, len(digits)):
s += digits[i] * p
p -= 1
reminder = s % 11
if reminder == 0 or reminder == 1:
return 0
else:
return 11 - reminder | 0e3f8cc4b1f42265f27c03b10559183f0bbd87e0 | 688,376 |
def find_rlc(p_utility, q_utility, r_set, l_set, c_set):
"""
Proportional controllers for adjusting the resistance and capacitance values in the RLC load bank
:param p_utility: utility/source active power in watts
:param q_utility: utility/source reactive power in var
:param r_set: prior resistor % change
:param l_set: prior inductor % change
:param c_set: prior capacitor % change
:return:
"""
smoothing_factor = 0.50 # only move a small percentage of the desired change for stability
cap = c_set + (6./1300. * q_utility) * smoothing_factor
res = r_set + (50.5/11700. * p_utility) * smoothing_factor
return res, l_set, cap | ec6a4476bb842f0e305e25da0245a4ee2c0945b0 | 688,380 |
def score1(rule, c=0):
"""
Calculate candidate score depending on the rule's confidence.
Parameters:
rule (dict): rule from rules_dict
c (int): constant for smoothing
Returns:
score (float): candidate score
"""
score = rule["rule_supp"] / (rule["body_supp"] + c)
return score | c329d1154d59aed6bf62f0af1bcbbd7e237871c2 | 688,382 |
import logging
import json
def extract_english_corpus(json_str, verbose=False):
"""A helper function to extract English corpus from KPTimes dataset in json
:param: json_str: the json string
:param: verbose: bool, if logging the process of data processing
:returns: the articles and keywords for each article
:rtype: src (list of string), tgt (list of keyword list)
"""
src = []
tgt = []
for idx in range(len(json_str)):
if idx % 1000 == 0:
if verbose:
logging.info('processing idx: ', idx)
data = json.loads(json_str[idx])
article = data['abstract']
keyword = data['keyword']
keyword = keyword.split(';')
src.append(article)
tgt.append(keyword)
return src, tgt | 7a587733c24a33a5140dac695f4d10a5c18d6e97 | 688,387 |
import re
def humansorted_datasets(l, key=None):
"""Sort a list of datasets according to a key of a dataset
Parameters
----------
l : list
The list of datasets to be sorted
key : str (optional)
The key of the dataset the datasets should be sorted according to.
Defaults to 'name'.
Returns
-------
list
The sorted list of datasets.
"""
key = key or 'name'
def alphanum_key(s):
key = re.split(r'(\d+)', s)
key[1::2] = map(int, key[1::2])
return key
def alphanum_dataset(d):
s = d[key]
return alphanum_key(s)
return sorted(l, key=alphanum_dataset) | 8817fb61b563feaec51aa6ae35c7df1ae20f4ac7 | 688,393 |
def sum_multiples_three_five(number):
"""
number: random integer
return: the sum of all multipliers of 3 and 5 below number
"""
multipliers = []
n = 0
while n < number:
if n % 3 == 0 or n % 5 == 0:
multipliers.append(n)
n += 1
return sum(multipliers) | 8a8b5fcd5c66db6a9dea95e0a7fc5d3c5a7900a6 | 688,394 |
def asURL(epsg):
""" convert EPSG code to OGC URL CRS
``http://www.opengis.net/def/crs/EPSG/0/<code>`` notation """
return "http://www.opengis.net/def/crs/EPSG/0/%d" % int(epsg) | f0bb82853e2782cbef7fbd54414c26a159669a08 | 688,395 |
import copy
def min_specializations(h,domains,x):
"""Implement a function min_specializations(h, domains, x)
for a hypothesis h and an example x. The argument
domains is a list of lists, in which the i-th
sub-list contains the possible values of feature i.
The function should return all minimal specializations
of h with respect to domains which are not fulfilled by x."""
specializations = []
for i,element in enumerate(h):
if element == "?":
possible_values = copy.deepcopy(domains[i])
possible_values.remove(x[i])
for val in possible_values:
temp_h = list(h)
temp_h[i] = val
specializations.append(tuple(temp_h))
else:
temp_h = list(h)
temp_h[i] = "T"
specializations.append(tuple(temp_h))
return specializations | fb0205ca1a25aa31bcc9ebb4eefbacbd2dce8800 | 688,401 |
import random
def random_cell(grid, snake):
"""
Generates a new random position on the space of free cells.
:param grid: The grid.
:param snake: The snake whose body will represent occupied cells.
:returns: Position of a free cell.
"""
while True:
x = random.randrange(grid.rows)
y = random.randrange(grid.rows)
if len(list(filter(lambda z: z.pos == (x, y), snake.body))) > 0:
continue
else:
break
return x, y | f4cb0d7940c07e94972de3c1e38c3a9116acb435 | 688,403 |
def point_in_polygon(point, polygon):
"""
Determines whether a [x,y] point is strictly inside a convex polygon
defined as an ordered list of [x,y] points.
:param point: the point to check
:param polygon: the polygon
:return: True if point is inside polygon, False otherwise
"""
x = point[0]
y = point[1]
n = len(polygon)
inside = False
xints = 0.0
p1x, p1y = polygon[0]
for i in range(n + 1):
p2x, p2y = polygon[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside | a776f16b6560d2efcc8e86a56f89029cb35a2867 | 688,404 |
def tag(pages, tag):
"""Pages with a given tag."""
if not tag:
return pages
return [p for p in pages if tag in p.tags] | aadb70a84364042863e57bc6aa40b2ff8f4a4158 | 688,410 |
def get_speciesindices(specieslist):
"""
Create a dictionary to assign an arbitrary index to each of the species in
the kinetic scheme.
Parameters
----------
specieslist : list
a list of all the species in the model
Returns
-------
speciesindices : dict
a dictionary of arbitrary indices with the species
from specieslist as keys
indices_to_species : dict
the reverse of speciesindices (keys are the indices
and values are the species)
"""
speciesindices = {}
index = 0
for x in specieslist:
speciesindices[x] = index
index += 1
indices_to_species = dict(zip(speciesindices.values(),
speciesindices.keys()))
return speciesindices, indices_to_species | 8f309de181cbed3eb6499821da59116a426c16c3 | 688,412 |
def dump_cookies(cookies_list):
"""Dumps cookies to list
"""
cookies = []
for c in cookies_list:
cookies.append({
'name': c.name,
'domain': c.domain,
'value': c.value})
return cookies | b04b1a54bc4aa10e15fe5e28d59b4b9a51a89f1f | 688,413 |
from typing import List
from typing import Dict
def divide_blocks(
blocks: List[int],
world_size: int) -> Dict[int, List[int]]:
"""
Divide the blocks into world_size partitions, and return the divided block indexes for the
given work_rank
:param blocks: the blocks and each item is the given block size
:param world_size: total world size
:return: a dict, the key is the world rank, and the value the block indexes
"""
if len(blocks) < world_size:
raise Exception("do not have enough blocks to divide")
results = {}
tmp_queue = {}
for i in range(world_size):
results[i] = []
tmp_queue[i] = 0
indexes = range(len(blocks))
blocks_with_indexes = dict(zip(indexes, blocks))
blocks_with_indexes = dict(sorted(blocks_with_indexes.items(),
key=lambda item: item[1],
reverse=True))
for i, block in blocks_with_indexes.items():
rank = sorted(tmp_queue, key=lambda x: tmp_queue[x])[0]
results[rank].append(i)
tmp_queue[rank] = tmp_queue[rank] + block
for i, indexes in results.items():
results[i] = sorted(indexes)
return results | ff14768161a78aacccfe827f00493482dd54c830 | 688,416 |
def split_train_test(X, y, test_percentage):
"""
Randomly split given dataset into training- and testing sets
:param X: Design matrix to split
:param y: Response vector to split
:param test_percentage: Percentage of samples to use as test
:return: Two tuples of: (train set X, train set y), (test set X, test set y)
"""
X = X.sample(frac=1)
y = y.reindex_like(X)
n = round(test_percentage * len(y))
return (X[:-n], y[:-n]), (X[-n:], y[-n:]) | 7f98f9bb5ef9376308da9e10518c94ee1680f71e | 688,417 |
def p1_f_linear(x):
"""DocTest module Expected Output Test - don't change or delete these lines
>>> x = [565, 872, 711, 964, 340, 761, 2, 233, 562, 854]
>>> print("The minimum is: ",p1_f_linear(x))
The minimum is: 2
"""
# ******ENTER YOUR FINAL CHECKED CODE AFTER THIS COMMENT BLOCK*******************
# Setting the first element as the tentative minimum
minimum = x[0]
# For each item in the list, compare to the tentative minimum and set item to minimum if lower
for item in x:
if item < minimum:
minimum = item
return minimum | 3f2cf29418d29aacce8e86f2b644da98cb683313 | 688,418 |
def erroCsv(csvFile):
"""
Rename the csv file with err notation
:param csvFile: input csv file name
:return: new file name
"""
return csvFile.replace('.csv', '_err.csv') | 5d53de212072be4b28f2655c75e6205af27eed69 | 688,421 |
from typing import List
def _generate_sharded_filenames(filename: str) -> List[str]:
"""Generates filenames of the each file in the sharded filepath.
Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py.
Args:
filename: The sharded filepath.
Returns:
A list of filepaths for each file in the shard.
"""
base, count = filename.split('@')
count = int(count)
return ['{}-{:05d}-of-{:05d}'.format(base, i, count) for i in range(count)] | 4686aee6dc4d1924dfb1745c5d8a3ae77a604a85 | 688,423 |
def drop_columns(tabular, n):
"""drops first n items from each row and returns new tabular data
>>> drop_columns([[1, 2, 3],
[21, 22, 23],
[31, 32, 33]],
1)
[[2, 3], [22, 23], [32, 33]]
"""
return [row[n:] for row in tabular] | d70698637c96eb579439e01bf7c913f7d64d3567 | 688,424 |
def clicked_quality_reward(responses):
"""Calculates the total clicked watchtime from a list of responses.
Args:
responses: A list of IEvResponse objects
Returns:
reward: A float representing the total watch time from the responses
"""
qual = 0.0
watch = 0.0
for response in responses:
if response.clicked:
qual += float(response.quality)
watch += float(response.watch_time)
return [qual, watch] | a1b1b5cd93b759775125f486146823e771fc4231 | 688,426 |
def get_nim_sum(state: tuple[int, ...]) -> int:
"""
Get the nim sum of a position. See https://www.archimedes-lab.org/How_to_Solve/Win_at_Nim.html
:param state: the state of the game
:return: the nim sum of the current position
"""
cur_sum = 0
for n in state:
cur_sum ^= n
return cur_sum | d1b3cf67d86fce56ffd69cb6ede438b8c2cc85f6 | 688,427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.