content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import random
import decimal
def randdecimal(precision, scale):
"""Generate a random decimal value with specified precision and scale.
Parameters
----------
precision : int
The maximum number of digits to generate. Must be an integer between 1
and 38 inclusive.
scale : int
The maximum number of digits following the decimal point. Must be an
integer greater than or equal to 0.
Returns
-------
decimal_value : decimal.Decimal
A random decimal.Decimal object with the specifed precision and scale.
"""
assert 1 <= precision <= 38, 'precision must be between 1 and 38 inclusive'
if scale < 0:
raise ValueError(
'randdecimal does not yet support generating decimals with '
'negative scale'
)
max_whole_value = 10 ** (precision - scale) - 1
whole = random.randint(-max_whole_value, max_whole_value)
if not scale:
return decimal.Decimal(whole)
max_fractional_value = 10 ** scale - 1
fractional = random.randint(0, max_fractional_value)
return decimal.Decimal(
'{}.{}'.format(whole, str(fractional).rjust(scale, '0'))
) | 40f1747d735e6de7f9c42dc999d888783abb64e4 | 685,164 |
def max_transmit_rule(mod, l, tmp):
"""
**Constraint Name**: TxSimple_Max_Transmit_Constraint
**Enforced Over**: TX_SIMPLE_OPR_TMPS
Transmitted power cannot exceed the maximum transmission flow capacity in
each operational timepoint.
"""
return (
mod.TxSimple_Transmit_Power_MW[l, tmp]
<= mod.Tx_Max_Capacity_MW[l, mod.period[tmp]]
* mod.Tx_Availability_Derate[l, tmp]
) | c586b466e13e695484381a59b11fb3e286e6e914 | 685,166 |
def get_lam_list(self, is_int_to_ext=True):
"""Returns the ordered list of lamination of the machine
Parameters
----------
self : MachineSRM
MachineSRM object
is_int_to_ext : bool
true to order the list from the inner lamination to the extrenal one
Returns
-------
lam_list : list
Ordered lamination list
"""
if self.rotor.is_internal:
In = self.rotor
Ext = self.stator
else:
In = self.stator
Ext = self.rotor
if is_int_to_ext:
return [In, Ext]
else:
return [Ext, In] | cd798c936a7fe6f11e3f5b0ebcb621339adf7d3e | 685,169 |
def column(matrix, i):
"""
Gets column of matrix.
INPUTS:
Matrix, Int of column to look at
RETURNS:
Array of the column
"""
return [row[i] for row in matrix] | ab7fba90b5f87486a7f52ba3f8aad5c0047ecad9 | 685,172 |
def _ensure_extension(filename: str, extension: str):
"""Add the extension if needed."""
if filename.endswith(extension):
return filename
return filename + extension | 11846b555c6663b9a0916f6a6981baf365fcdf9a | 685,176 |
import socket
def have_connectivity(host="8.8.8.8", port=53, timeout=3):
""" Attempt to make a DNS connection to see if we're on the Internet.
From https://stackoverflow.com/questions/3764291/checking-network-connection
@param host: the host IP to connect to (default 8.8.8.8, google-public-dns-a.google.com)
@param port: the port to connect to (default 53, TCP)
@param timeout: seconds before timeout (default 3)
@returns: True if connected; False otherwise.
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
return False | 4234cae9d361e1a42de3fa17ab6becfdf2ab20d3 | 685,177 |
def str2list(data):
"""
Create a list of values from a whitespace and newline delimited text (keys are ignored).
For example, this:
ip 1.2.3.4
ip 1.2.3.5
ip 1.2.3.6
becomes:
['1.2.3.4', '1.2.3.5', '1.2.3.6']
"""
list_data = []
for line in data.split('\n'):
line = line.strip()
if not line:
continue
try:
splitted = line.split(' ')
# key = splitted[0]
value = splitted[1]
except Exception:
continue
list_data.append(value)
return list_data | 496a704a081ba8a0fc1d94a2959470bc4697ca48 | 685,178 |
from datetime import datetime
def read_mne_raw_edf_header(edf_obj, **kwargs):
"""
Header extraction function for RawEDF and Raw objects.
Reads the number of channels, channel names and sample rate properties
If existing, reads the date information as well.
Returns:
Header information as dict
"""
# Each tuple below follows the format:
# 1) output name, 2) edf_obj name, 3) function to apply to the read
# value, 4) whether a missing value should raise an error.
header_map = [("n_channels", "nchan", int, True),
("channel_names", "ch_names", list, True),
("sample_rate", "sfreq", float, True),
("date", "meas_date", datetime.utcfromtimestamp, False)]
if isinstance(edf_obj.info["meas_date"], (tuple, list)):
assert edf_obj.info["meas_date"][1] == 0
edf_obj.info["meas_date"] = edf_obj.info["meas_date"][0]
header = {}
for renamed, org, transform, raise_err in header_map:
value = edf_obj.info.get(org)
try:
value = transform(value)
except Exception as e:
if raise_err:
raise ValueError("Missing or invalid value in EDF for key {} "
"- got {}".format(org, value)) from e
header[renamed] = value
return header | 1e9c13757e5c4221e137a38e57ab18833c0d1eb0 | 685,179 |
def generateRedirectURL(host, params):
"""
Computes the redirect url in the specified format.
:return: (string)
"""
return "{}?{}".format(host, params) | 3a8f5649492222339d0e10d87094743cd2e5eba5 | 685,181 |
def formatter(msg, values):
"""Functional form of format on strings"""
return msg.format(**values) | fc7cb8329528df07536b5a1e3d24bbbf0cd19225 | 685,183 |
from typing import Union
import base64
def read_file_as_b64(path: Union[str, bytes, int]) -> str:
"""Reads a file's contents as binary bytes and encodes it in a base64-string.
Args:
path (Union[str, bytes, int]): binary file path
Returns:
str: file's bytes in base64-encoded string
"""
with open(path, "rb") as contract_file:
contract_bytes = base64.b64encode(contract_file.read()).decode()
return contract_bytes | c14e24628d1ca9b0742fb2776922467dc6183feb | 685,188 |
def isstr(s):
"""True if 's' is an instance of basestring in py2, or of str in py3"""
bs = getattr(__builtins__, 'basestring', str)
return isinstance(s, bs) | 336570eb1b5e4be92788ec4cb6f1c7f7d02b47a5 | 685,189 |
from typing import List
import copy
def insertion_sort(x: List) -> List:
"""Insertion sort compares elements and moves them to their correct position by repeatedly comparing an element
with previous elements in the list until its correct position is located, then moving the element to its correct
position. It has an average time complexity of Θ(n^2) due to the nesting of its two loops. Time complexity for the
worst case, when the list is sorted in reverse order, is O(n^2). Time complexity for the best case, when the list
is already sorted in the correct order, is Ω(n).
>>> insertion_sort([4, 2, 3, 1, 0, 5])
[0, 1, 2, 3, 4, 5]
:param x: list to be sorted
:return: new sorted list
"""
a_list = copy.deepcopy(x) # To avoid modifying the original list
length = len(a_list)
for i in range(length):
idx_to_insert_at = None
for current_idx in range(i - 1, -1, -1):
if a_list[current_idx] > a_list[i]:
idx_to_insert_at = current_idx
else:
# The list upto the current_idx is fully sorted with elements less than the element at index i
# The inner loop can thus be safely terminated, and the sorting process moved onto the next index
break
if idx_to_insert_at is not None:
a_list.insert(idx_to_insert_at, a_list.pop(i))
return a_list | e821aea2a8718ca446c7ad5db0391a5c2dc96b2a | 685,193 |
from typing import Union
import logging
def get_level(level:Union[str,int]) -> str:
"""Return the logging level as a string"""
if isinstance(level, str):
return level.upper()
return logging.getLevelName(level) | c545f15a3d06a6d558ecae5c98f5f965fdb4d8e0 | 685,194 |
def set_cur_model(client, model, drawing=None):
"""Set the active model on a drawing.
Args:
client (obj):
creopyson Client
model (str):
Model name.
drawing (str, optional):
Drawing name. Defaults: current active drawing.
Returns:
None
"""
data = {"model": model}
if drawing:
data["drawing"] = drawing
return client._creoson_post("drawing", "set_cur_model", data) | 5cfade6f22bbc076c97f49b24436b8bfa9b5c0f6 | 685,195 |
def G(poisson, young):
"""
Shear Modulus in MPa.
"""
result = young/(2*(1+poisson))
return result | 9eb216d08c2c8baa3d9d15fff0ba3322fd344e06 | 685,197 |
from typing import Union
def operacion_basica(a: float, b: float, multiplicar: bool) -> Union[float, str]:
"""Toma dos números (a, b) y un booleano (multiplicar):
- Si multiplicar es True: devuelve la multiplicación entre a y b.
- Si multiplicar es False: devuelve la division entre a y b.
- Si multiplicar es False y b es cero: devuelve "Operación no válida".
Restricciones:
- Utilizar un único return.
- No utilizar AND ni OR.
"""
if multiplicar:
result = a * b
else:
if b == 0:
result = "Operación no válida"
else:
result = a / b
return result | 24bce4394d3daf0b1817ff533bf11884f635f74c | 685,198 |
import math
def is_pentagonal(p: int) -> int:
"""
P = n * (3n - 1) / 2
If P is pentagonal, the above equation will have a positive integer solution
for n. We use the quadratic formula to check if either solution for n is a
positive integer
"""
root = math.sqrt(24 * p + 1)
return root.is_integer() and ((1 + root) / 6).is_integer() | 0561f7453eeda33c780917c708eaa8fa88ac0df4 | 685,199 |
def int2uint32(value):
"""
Convert a signed 32 bits integer into an unsigned 32 bits integer.
>>> print(int2uint32(1))
1
>>> print(int2uint32(2**32 + 1)) # ignore bits larger than 32 bits
1
>>> print(int2uint32(-1))
4294967295
"""
return value & 0xffffffff | fe13d66f069796caf668f35b300e52a5eda40723 | 685,203 |
def to_camel_case(snake_case_string):
"""
Convert a string from snake case to camel case.
:param snake_case_string: Snake-cased string to convert to camel case.
:returns: Camel-cased version of snake_case_string.
"""
parts = snake_case_string.lstrip('_').split('_')
return parts[0] + ''.join([i.title() for i in parts[1:]]) | 3dfc887461febb756a69eaefe3bbf245972e7ce4 | 685,204 |
def sstate_get_manifest_filename(task, d):
"""
Return the sstate manifest file path for a particular task.
Also returns the datastore that can be used to query related variables.
"""
d2 = d.createCopy()
extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
if extrainf:
d2.setVar("SSTATE_MANMACH", extrainf)
return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2) | 274c60301f94e0d98cee18b03a6aeecbfcf371fd | 685,208 |
def get_nodes(edges):
"""
Get nodes from list of edges.
Parameters
----------
edges : list
List of tuples (source page, end page).
Returns
-------
list
nodes, not unique
"""
nodes = []
for edge in edges:
nodes += list(edge)
return nodes | d4c713fb2aed97fe6629c3a6e39b6fed2cf41213 | 685,211 |
def t2n(torch_tensor):
"""
Convert torch.tensor to np.ndarray.
"""
return torch_tensor.cpu().detach().numpy() | c48d8a715acfd832bfbf5a86518d33e20c30cd15 | 685,212 |
import torch
def build_vertices(corners1:torch.Tensor, corners2:torch.Tensor,
c1_in_2:torch.Tensor, c2_in_1:torch.Tensor,
inters:torch.Tensor, mask_inter:torch.Tensor):
"""find vertices of intersection area
Args:
corners1 (torch.Tensor): (B, N, 4, 2)
corners2 (torch.Tensor): (B, N, 4, 2)
c1_in_2 (torch.Tensor): Bool, (B, N, 4)
c2_in_1 (torch.Tensor): Bool, (B, N, 4)
inters (torch.Tensor): (B, N, 4, 4, 2)
mask_inter (torch.Tensor): (B, N, 4, 4)
Returns:
vertices (torch.Tensor): (B, N, 24, 2) vertices of intersection area. only some elements are valid
mask (torch.Tensor): (B, N, 24) indicates valid elements in vertices
"""
# NOTE: inter has elements equals zero and has zeros gradient (masked by multiplying with 0).
# can be used as trick
B = corners1.size()[0]
N = corners1.size()[1]
vertices = torch.cat([corners1, corners2, inters.view([B, N, -1, 2])], dim=2) # (B, N, 4+4+16, 2)
mask = torch.cat([c1_in_2, c2_in_1, mask_inter.view([B, N, -1])], dim=2) # Bool (B, N, 4+4+16)
return vertices, mask | 2aa4e7a4207841021831181e611c57fa63a21ef6 | 685,214 |
def _get_ips(ips_as_string):
"""Returns viable v4 and v6 IPs from a space separated string."""
ips = ips_as_string.split(" ")[1:] # skip the header
ips_v4, ips_v6 = [], []
# There is no guarantee if all the IPs are valid and sorted by type.
for ip in ips:
if not ip:
continue
if "." in ip and ":" not in ip:
ips_v4.append(ip)
else:
ips_v6.append(ip)
return ips_v4, ips_v6 | 7f1c88f3006faab44680af39c9e87661a35e4462 | 685,220 |
def rmFromList(lst, thing=''):
"""Removes all values matching thing from a list"""
lst = list(lst)
for i in range(len(lst)-1, -1, -1):
if lst[i] == thing:
del lst[i]
return lst | 726cfd6a1dca73c66fb2f2bbd8767795bd0290fc | 685,230 |
import json
import six
import base64
def _base64_encode(dictionary):
"""Returns base64(json(dictionary)).
:param dictionary: dict to encode
:type dictionary: dict
:returns: base64 encoding
:rtype: str
"""
json_str = json.dumps(dictionary, sort_keys=True)
str_bytes = six.b(json_str)
return base64.b64encode(str_bytes).decode('utf-8') | dbd5e5bcfecb365ba0009f6e4e3f9ddaa8148bf1 | 685,231 |
def to_bytes(string):
"""Convert string to bytes type."""
if isinstance(string, str):
string = string.encode('utf-8')
return string | 3432641862dbe5d45aa301f1f79caee0562fd867 | 685,232 |
def check_in(position, info_pos_line_pairs):
"""
Check if position corresponds to the starting position of a pair in info_pos_line_pairs
(return pair if found, empty list otherwise)
:param position: list
:param info_pos_line_pairs: list of lists
:return: list
"""
# pos_pair form: [info, [in_line_position_1, line_number_1], [in_line_position_2, line_number_2]]
for i in info_pos_line_pairs:
if position[0] == i[1][0] and position[1] == i[1][1]:
return i
return [] | 26231ec22468e6fccd14d55b30b818a8c1773b78 | 685,235 |
import re
def substitute_and_count(this, that, var, replace=True, count=0):
"""Perform a re substitute, but also count the # of matches"""
(result, ctr) = re.subn(this, that, var, count=count)
if not replace:
return (var, result)
return (result, ctr) | f6f1323287d05fb8a01b5ee73269fe8f2a053a3d | 685,236 |
def is_valid(x, y, grid, x_max, y_max):
"""Check the bounds and free space in the map"""
if 0 <= x < x_max and 0 <= y < y_max:
return grid[x][y] == 0
return False | 48edd43ec518d7e3b0e9161123977d87bd8f6a09 | 685,238 |
def decode(data):
"""
Normalize a "compressed" dictionary with special 'map' entry.
This format looks like a way to reduce bandwidth by avoiding repeated
key strings. Maybe it's a JSON standard with a built-in method to
decode it? But since I'm REST illiterate, we decode it manually!
For example, the following data object:
data = {
"244526" : [
"Starter Songs",
[
134082068,
134082066,
134082069,
134082067
],
"1234-1234-1234-1234",
false,
null,
null,
null,
null,
1
],
"map" : {
"artwork_id" : 7,
"description" : 6,
"name" : 0,
"public_id" : 4,
"sort" : 8,
"system_created" : 3,
"tracks" : 1,
"type" : 5,
"uid" : 2
}
}
will be decoded to:
data = {
"244526" : {
"name": "Starter Songs",
"tracks": [
134082068,
134082066,
134082069,
134082067
],
"uid": "1234-1234-1234-1234",
"system_created": false,
"public_id": null,
"type": null,
"description": null,
"artwork_id": null,
"sort": 1
}
}
"""
if not 'map' in data or type(data['map']) is not dict:
return data
keymap = {v: k for (k, v) in data['map'].items()}
result = {}
for k, v in data.items():
if type(v) is list:
result[k] = {keymap[i]: v[i] for i in range(len(v))}
return result | c952e08e300dbf0e9574a1076e3342ce0c3939f6 | 685,242 |
import torch
def trainGenerator(netG, netD, optimiserG, inputs, targets, loss_func, device, lambdapar):
"""
This function trains the generator
:param netG: The generator network
:param netD: The discriminator network
:param optimiserG: The optimiser for the generator
:param inputs: The low resolution input signal for training
:param targets: The high resolution target signal for training
:param loss_func: The non-adversarial loss function used
:param device: The device the model is training on
:param lambdapar: The hyperparameter altering the proportion of the loss that is adversarial
:return: The generator loss
"""
optimiserG.zero_grad()
fake_waves = netG(inputs.clone()).to(device)
fake_validity = netD(fake_waves.clone())
normal_loss = loss_func(fake_waves.clone(), targets.clone())
g_loss = normal_loss - lambdapar * torch.mean(fake_validity)
g_loss.backward()
optimiserG.step()
return g_loss | a14bd4b8749f765966fb7b8601c09dbcf89eda67 | 685,243 |
from typing import Tuple
from typing import List
def get_moves(
group_position: Tuple[int, int], size: int, neighbors: List[Tuple[int, int]]
):
"""
:param group_position:
:param size:
:param neighbors:
:return: A list of move tuples from group_position to each neighbor
"""
return [(*group_position, size, *to) for to in neighbors] | 1eb065f168a01447a2fe4184cc3dee60f0d5a25e | 685,244 |
def exc_msg_str(exception, default="") -> str:
"""
Extract the exception's message, or its str representation, or the default message, in order of
priority.
"""
try:
msg = exception.args[0]
except (AttributeError, IndexError):
msg = None
if not msg or not isinstance(msg, str):
msg = str(exception).strip()
return msg if msg else default | db0b8fd6c2ad8aea31a3ae083e20ff6d5113195d | 685,245 |
def make_album(artist, title):
"""Build a dictionary containing information about an album."""
album_dictionary = {'artist' : artist.title(), 'title' : title.title()}
return album_dictionary | e717d11f8be8ae65e469c80225ad61a08b30cfcc | 685,250 |
def average_list(l1, l2):
"""Return the average of two lists"""
return [(i1 + i2) / 2 for i1, i2 in zip(l1, l2)] | ce93cdeeacd8de5eb1d752300c738212ed84bd25 | 685,252 |
def rssf(rr, index, scale=10):
"""Read and scale single to float."""
return float(rr.registers[index]) / scale | 93b2a141c4687583592f7dc8e1dedf61de74d2bd | 685,256 |
def distance(point1, point2):
"""
Returns the Euclidean distance of two points in the Cartesian Plane.
>>> distance([3,4],[0,0])
5.0
>>> distance([3,6],[10,6])
7.0
"""
return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5 | f57446d1a27ca42e347b54310a9bc4184d4519ce | 685,260 |
def factorial(n: int) -> int:
"""Função recursiva para o cálculo fatorial
Args:
n (int): Valor que terá o fatorial calculado
Returns:
int: Fatorial calculado
"""
if n > 1:
return factorial(n - 1) * n
return 1 | 0aa773189f7b501a993378a599f9a4b780b0012d | 685,264 |
import requests
import logging
def validate_server(url):
"""
Validates if a konduit.Server is running under the specified url. Returns True if
the server is running, False otherwise.
:param url: host and port of the server as str
:return: boolean
"""
try:
r = requests.get("{}/healthcheck".format(url))
if r.status_code != 204:
logging.error(
"The server health checks failed. Please verify that the server is running without any "
"issues..."
)
else:
return True
except Exception as ex:
logging.error(
"{}\nUnable to connect to the server or the server health checks have failed. Please "
"verify that the server is running without any issues...".format(str(ex))
)
return False | 5fde702b3b0d6e0ec7c19d7903a62c24a98b21c5 | 685,269 |
from typing import List
def print_bitstring(width: int, bitset: List[int]) -> str:
"""Format and print a bitset of a certain width with padding."""
return "".join(map(lambda x: bin(x)[2:].zfill(8)[:width], bitset)) | c13ecb3e6507dba9d742ddcb606f156ee6199cdb | 685,270 |
import contextlib
import socket
def tcp_port_connectable(hostname, port):
"""
Return true if we can connect to a TCP port
"""
try:
with contextlib.closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as sk:
sk.settimeout(5)
sk.connect((hostname, port))
return True
except socket.error:
return False | 7a6c3f5baddfe1726e5f6c1c364c21c3cf94bbf9 | 685,273 |
def maxdt(area, shape, maxvel):
"""
Calculate the maximum time step that can be used in the simulation.
Uses the result of the Von Neumann type analysis of Di Bartolo et al.
(2012).
Parameters:
* area : [xmin, xmax, zmin, zmax]
The x, z limits of the simulation area, e.g., the shallowest point is
at zmin, the deepest at zmax.
* shape : (nz, nx)
The number of nodes in the finite difference grid
* maxvel : float
The maximum velocity in the medium
Returns:
* maxdt : float
The maximum time step
"""
x1, x2, z1, z2 = area
nz, nx = shape
spacing = min([(x2 - x1) / (nx - 1), (z2 - z1) / (nz - 1)])
return 0.606 * spacing / maxvel | 24f18ef7b7a8194192d0b100fd7d32fc0c608f61 | 685,274 |
def get_simulation_attributes(cube):
"""Get model, experiment and mip information."""
model = cube.attributes['model_id']
experiment = cube.attributes['experiment_id']
physics = cube.attributes['physics_version']
run = cube.attributes['realization']
mip = 'r%si1p%s' %(run, physics)
if experiment == 'historicalMisc':
experiment = 'historicalAA'
return model, experiment, mip | 719aef44715b6172a45f617c63eeae75673ae2c4 | 685,275 |
def _create_python_packages_string(python_packages: list):
"""
Builds the specific string required by Zeppelin images to install Python packages.
:param python_packages: list containing Python package strings
:return: the properly formatted Python packages string
"""
if len(python_packages) == 0:
return None
else:
return " ".join(python_packages) | 99c674f2a31936732fd5f2051cc61258cd267c96 | 685,279 |
import math
def window(outsideLux):
"""Returns the effect of windows on lux."""
if outsideLux > 1:
percent = 1/math.log(outsideLux)
if percent > 100:
percent = 100.0
return outsideLux*percent
else:
return outsideLux / 10.0 | 0b0f5e0456a25c1da1339aa70c15af8208a908e5 | 685,280 |
def count(input_, condition=lambda x: True):
"""Count the number of items in an iterable for a given condition
For example:
>>>count("abc")
3
>>>count("abc", condition=lambda x: x=="a")
1
"""
return sum(condition(item) for item in input_) | 20114116ca54ba409cabaa992dc2bae0f4599fc9 | 685,284 |
from typing import OrderedDict
def format_budgets(budgets, allow_whitespace=False):
"""
Format budget-strings so that they are as short as possible while still distinguishable
Parameters
----------
budgets: List[str]
list with budgets
allow_whitespace: bool
if set to True, will return "budget 10.5" else "budget_10.5
Returns
-------
formatted_budgets: Dict[float] -> str
list with formatted budgets
"""
if len(budgets) == 0:
return {None : None}
def format_budget(b, round_to):
return 'budget_{}'.format(int(b)) if float(b).is_integer() else 'budget_{:.{}f}'.format(b, round_to)
round_to = 1
formatted_budgets = OrderedDict([(b, format_budget(b, round_to)) for b in budgets])
while len(set(formatted_budgets.values())) != len(formatted_budgets.values()):
round_to += 1
formatted_budgets = OrderedDict([(b, format_budget(b, round_to)) for b in budgets])
if allow_whitespace:
formatted_budgets = OrderedDict([(b, str(f).replace("_", " ")) for b, f in formatted_budgets.items()])
return formatted_budgets | d54170b6da385ac67570151c13c041a14cdd986d | 685,286 |
import click
def confirm(text):
# type: (str) -> bool
"""Confirm yes/no."""
return click.confirm(text) | 452dadd394bfbea8fcb795cbfda7fe98cfd01bda | 685,287 |
def pair_reads(r1, r2):
"""
Given bam entries for two ends of the same read (R1/R2) that have
been aligned as if they were single-end reads, set the fields in the
entries so that they are now properly paired.
Args:
r1, r2 (pysam.AlignedSegment): two ends of the same read, both
aligned and having the same read name
Returns:
r1, r2 (pysam.AlignedSegment): the same two reads that were
provided as arguments, but with the FLAG, RNEXT, PNEXT, and
TLEN fields modified to make them a proper pair
"""
# if the two ends map to the same reference, we need to set RNEXT
# to '=' for both reads and also calculate the TLEN
if r1.reference_name == r2.reference_name:
r1.next_reference_name = "="
r2.next_reference_name = "="
if r1.reference_start < r2.reference_start:
tlen = (
r2.reference_start
+ max(r2.get_reference_positions())
- r1.reference_start
)
r1.template_length = tlen
r2.template_length = -1 * tlen
else:
tlen = (
r1.reference_start
+ max(r1.get_reference_positions())
- r2.reference_start
)
r1.template_length = -1 * tlen
r2.template_length = tlen
else: # ends map to different references, so just set RNEXT
r1.next_reference_name = r2.reference_name
r2.next_reference_name = r1.reference_name
# set PNEXT
r1.next_reference_start = r2.reference_start
r2.next_reference_start = r1.reference_start
# set some bits in the FLAG
r1.is_paired, r2.is_paired = True, True
r1.is_proper_pair, r2.is_proper_pair = True, True
r1.mate_is_unmapped, r2.mate_is_unmapped = False, False
r1.mate_is_reverse = r2.is_reverse
r2.mate_is_reverse = r1.is_reverse
r1.is_read1, r2.is_read1 = True, False
r1.is_read2, r2.is_read2 = False, True
r1.is_secondary, r2.is_secondary = False, False
r1.is_supplementary, r2.is_supplementary = False, False
return r1, r2 | 07f5f33e0633a46a7a3ff847f1e278ccd158df00 | 685,288 |
def is_valid_port(port):
"""Checks a port number to check if it is within the valid range
Args:
port - int, port number to check
Returns:
bool, True if the port is within the valid range or False if not
"""
if not isinstance(port, int):
return False
return 1024 < port < 65535 | 54163d81e37d42c118a76af32a1b493e52bff560 | 685,292 |
def mask_landsat8_sr(image):
"""Mask clouds and cloud shadows in Landsat surface reflectance image.
Obtained from:
https://developers.google.com/earth-engine/datasets/catalog/LANDSAT_LC08_C01_T2_SR
Parameters
----------
image : ee.Image
Input image (Landsat 8 Surface Reflectance).
Returns
-------
N/A : ee.Image
Input image masked for clouds and cloud shadows.
Example
-------
>>> import ee
>>> peak_green = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_008057_20170602'
>>> peak_green_mask = mask_landsat8_sr(peak_green)
"""
# Define cloud (5) and cloud shadow (3) mask bits
cloud_shadow_bit_mask = (1 << 3)
cloud_bit_mask = (1 << 5)
# Get the pixel QA band
qa = image.select('pixel_qa')
# Mask image, based on both bit masks == 0
mask = qa.bitwiseAnd(cloud_shadow_bit_mask).eq(
0) and qa.bitwiseAnd(cloud_bit_mask).eq(0)
# Return masked image
return image.updateMask(mask) | eeed593eb73a89b4c55b4b1e6d8b56d350127cce | 685,293 |
import re
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name) | e3cf4edeed7d21b2f78671f9ce9450347584199a | 685,294 |
def deepget(obj, keys):
"""
Deepget is a small function enabling the user to "cherrypick" specific
values from deeply nested dicts or lists. This is useful, if the just one
specific value is needed, which is hidden in multiple hierarchies.
:Example:
>>> import diggrtoolbox as dt
>>> ENTRY = {'data' : {'raw': {'key1': 'value1',
'key2': 'value2'}}}
>>> KEY2 = ['data', 'raw', 'key2']
>>> dt.deepget(ENTRY, KEY2) == 'value2'
True
"""
def deeper(obj, keys):
if len(keys) > 1:
return deeper(obj[keys.pop(0)], keys)
else:
return obj[keys.pop(0)]
return deeper(obj, keys.copy()) | 45eef7c4a89323d61669eee5a01bda633e480218 | 685,296 |
import shutil
def is_travis_installed() -> bool:
"""Return a boolean representing if travis gem is installed."""
return shutil.which("travis") is not None | 3bcc3326fa23e669913aa23eebc672b65261868e | 685,300 |
def get_rlz_list(all_rlz_records):
""" Returns list of unique reverse lookup zone files """
# Casting as set() removes duplicates
return list(set([i["zone_file"] for i in all_rlz_records])) | 856597818b867336701a0e34eb3343d8603ac06f | 685,308 |
def image_scale(img):
"""Retrieves the image cell size (e.g., spatial resolution)
Args:
img (object): ee.Image
Returns:
float: The nominal scale in meters.
"""
return img.projection().nominalScale().getInfo() | f0c61e158cafb82142e6d8c5479aef186f13b7e3 | 685,310 |
def has_c19_scope (scopes):
""" Check if the COVID-19 GLIDE number or HRP code is present """
for scope in scopes:
if scope.type == "1" and scope.vocabulary == "1-2" and scope.code.upper() == "EP-2020-000012-001":
return True
elif scope.type == "2" and scope.vocabulary == "2-1" and scope.code.upper() == "HCOVD20":
return True
return False | 1d9c96d093450bd4ab0200eb190302b36eb593f7 | 685,311 |
def del_fake_nums(intList, step): #8
"""
Delete fake numbers added by the fake_nums function (only used in decryption)
"""
placeToDelNum = []
for index in range(0, len(intList), step+1):
placeToDelNum.append(index)
newIntList = [item for item in intList]
for index in reversed(placeToDelNum):
del newIntList[index]
return newIntList | a8bc781b60bfef5441bb69046f3b3db5196767a5 | 685,315 |
def preço_final(preço, **kwargs):
"""
Preço final
-----------
Calcula o valor final de um produto.
args
----
preço : float
Preço inicial do produto
**kwargs
--------
imposto : float
Imposto sobre o preço (%)
desconto : float
Desconto sobre o preço (%)
return
------
float
Valor final do valor do produto
"""
# Resgata os valores do dicionário 'kwargs'
imposto = kwargs.get('imposto')
desconto = kwargs.get('desconto')
# Se 'imposto' não for vazio (existir)
if imposto:
preço += preço * (imposto/100)
# Se 'desconto' não for vazio (existir)
if desconto:
preço -= preço * (desconto/100)
# Retorna o preço calculado
return preço | 0efe232ca55e803b162bf460be83f00547571c81 | 685,316 |
def convert_coordinates(value: str) -> float:
"""Convert coordinates to lat/long."""
if len(value) < 8:
return float(value[0] + "." + value[1:])
return float(value[0:2] + "." + value[2:]) | 3923c3aa85c0944e0d49b029122a3b6ba30492f6 | 685,318 |
def magnitude(vector):
""" get magnitude (length) of vector """
return (vector[0]**2 + vector[1]**2 + vector[2]**2) ** .5 | 6341599fa5c996cd6918e035a4133ef0562f26ea | 685,319 |
def add_user(db, meeting_id, user_email, responded=False,busy_times=[]):
"""
@brief addds a user to a specific collection in our mongo database
@param mongo the "meetme" database
@param meeting_id a unique meeting_id, randomly generated and shared across multiple users
attending the same meeting
@param user_email (str) the users email address
@param responsded (boolean) "True" if the user has responed and we have their busy times, "False" otherwise
@param busy_times(list of TimeBlocks) busy times from the individuals calendar, busy_times will be empty if the user has not yet responded
@return None, the result is that a new user has been added to a collection in our database
"""
user = {"user_meeting_id" : meeting_id,
"user_email": user_email,
"user_responded": responded,
"user_busy_times": busy_times
}
db.insert(user)
return None | f75d2d99386e0e291b44d8952eddf1ae88b3fc74 | 685,321 |
def is_complete(board: list[list[int]]) -> bool:
"""
Periksa apakah papan (matriks) telah terisi penuh dengan nilai bukan nol.
>>> is_complete([[1]])
True
>>> is_complete([[1, 2], [3, 0]])
False
"""
return not any(elem == 0 for row in board for elem in row) | 6290771a2cc1c3d24ca603514d07cc93a9ce4406 | 685,323 |
import sympy
def airystressint(XA, XB, YA, YB, P, a, b, E, nu):
"""
Calculate integrals of strain energy values based on an Airy stress function
"""
x1, x2, xa, xb, ya, yb = sympy.symbols("x_1, x_2 x_a x_b y_a y_b")
sigma = sympy.Matrix([
[3 * P / (2 * a**3 * b) * x1 * x2],
[0],
[3 * P / (4 * a * b) * (1 - (x2**2) / (a**2))]
])
C = 3 * P / (4 * E * a**3 * b)
epsilon = sympy.Matrix([
[2 * C * x1 * x2],
[-2 * nu * C * x1 * x2],
[2 * (1 + nu) * C * (a**2 - x2**2)]
])
SE = sigma.dot(epsilon)
SEi = sympy.lambdify((xa, xb, ya, yb), SE.integrate((x1, xa, xb), (x2, ya, yb)))
return SEi(XA, XB, YA, YB) | 5cbce39d65b53e62c3728400e1ad8daf98cb7b61 | 685,324 |
def dggs_cell_overlap(cell_one: str, cell_two: str):
"""
Determines whether two DGGS cells overlap.
Where cells are of different resolution, they will have different suid lengths. The zip function truncates the longer
to be the same length as the shorter, producing two lists for comparison. If these lists are equal, the cells overlap.
:param cell_one: the first DGGS cell
:param cell_two: the second DGGS cell
:return: True if overlaps
"""
for i, j in zip(cell_one, cell_two):
if i != j:
return False
return True | 7a666846f37917b07ee74c8780868f540f280d21 | 685,328 |
def groupms_byiconf(microstates, iconfs):
"""
This function takes in a list of microstates and a list of conformer indicies, divide microstates into two groups:
the first one is those contain one of the given conformers, the second one is those contain none of the listed conformers.
"""
ingroup = []
outgroup = []
for ms in microstates:
contain = False
for ic in iconfs:
if ic in ms.state:
ingroup.append(ms)
contain = True
break
if not contain:
outgroup.append(ms)
return ingroup, outgroup | c9c50f2dd8d0228b788f680aefe785a29b9a132c | 685,336 |
def get_bucket_and_key(s3_path):
"""Get the bucket name and key from the given path.
Args:
s3_path(str): Input S3 path
"""
s3_path = s3_path.replace('s3://', '')
s3_path = s3_path.replace('S3://', '') #Both cases
bucket, key = s3_path.split('/', 1)
return bucket, key | 4dc081f959a89c24653868b0ae9f297ed81d2589 | 685,340 |
def boiler_mode(raw_table, base_index):
""" Convert boiler mode to english """
value = raw_table[base_index]
if value == 4:
return "Summer"
if value == 5:
return "Winder"
return "Unknown" | 4f171ace2286f9e77aebd45027efaaf35c2741e1 | 685,341 |
def get_longitude_ref_multiplier(imgMetadata):
"""
Returns the longitude multiplier according to the
Exif.GPSInfo.GPSLongitudeRef EXIF tag contained in the imgMetadata dict.
"""
if imgMetadata['Exif.GPSInfo.GPSLongitudeRef'].value.lower() == 'w':
return -1
return 1 | 46fb8631b0b8f1f6d9d067761f2de4a14273710e | 685,342 |
def args_to_string(args):
"""
Transform experiment's arguments into a string
:param args:
:return: string
"""
args_string = ""
args_to_show = ["experiment", "network_name", "fit_by_epoch", "bz_train",
"lr", "decay", "local_steps"]
for arg in args_to_show:
args_string += arg
args_string += "_" + str(getattr(args, arg)) + "_"
return args_string[:-1] | d2b1da8143af92d50cdbf07c1f61f0915f1980c7 | 685,344 |
def esc_format(text):
"""Return text with formatting escaped
Markdown requires a backslash before literal underscores or asterisk, to
avoid formatting to bold or italics.
"""
for symbol in ['_', '*', '[', ']', '(', ')', '~', '`', '>', '#', '+', '-', '=', '|', '{', '}', '.', '!']:
text = str(text).replace(symbol, '\\' + symbol)
return text | 12770a69da56cd6e9de92a6a301857d42ee1381b | 685,345 |
import json
def load_json(filename):
"""Loads a JSON file and returns a dict of its documents."""
docs = {}
with open(filename, encoding="utf-8") as fh:
lines = fh.readlines()
for line in lines:
doc = json.loads(line)
docs[doc["_id"]] = doc
return docs | 76565128ad98f3f125b0b1a3b13aaabbe5338e3b | 685,348 |
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index | 461af60da32b21b2e98826c9a728f5bf09d9d516 | 685,350 |
def text_tostring(t, default=None, emphasis='emphasis', strong='strong', sup='sup',
fn_anc='fn_anc', fn_sym='fn_sym'):
"""Convert Text object to str.
Mark all styled text with special characters and return str
Example:
>>> t = [('Unmarked text. ', None), ('Text marked "emphasis".', 'emphasis'),
... (' ', None), ('R', 'sup'), ('not marked', None), ('R', 'sup'),
... ('.', None)]
>>> text_tostring(t)
'Unmarked text. *Text marked "emphasis".* <R>not marked<R>.'
"""
markers = {
default: '{}',
emphasis: '*{}*',
strong: '**{}**',
sup: '<{}>',
fn_anc: '[^{}]',
fn_sym: '[^{}]:',
}
s = ''.join(markers[style].format(text) for text, style in t)
return ' '.join(s.split()) | 85def71094b60d5ec0676b6551aa8449e043021e | 685,351 |
import dill
def block_worker(payload):
"""
Worker function used compute pairwise distance/similarity over a whole
block.
"""
similarity, block, serialized, graph = payload
if serialized:
similarity = dill.loads(similarity)
pairs = []
n = len(block)
for i in range(n):
A = block[i]
for j in range(i + 1, n):
B = block[j]
if graph is not None and A in graph and B in graph[A]:
continue
if similarity(A, B):
pairs.append((A, B))
return pairs | 49209ca96aabf483c9485a639c7156f30b888d1b | 685,352 |
import glob
def setup_filepaths(data_path, participant_numbers):
"""Set up filepaths for reading in participant .h5 files.
Args:
data_path (str): path to directory containing .h5 files
participant_numbers (list): participant numbers for filepaths
Returns:
list: filepaths to all of the .h5 files
"""
all_filepaths = []
for participant_number in participant_numbers:
filepaths = glob.glob(data_path + "/" + participant_number + "_*.h5")
all_filepaths += filepaths
return all_filepaths | b845ae2677e185936427fe8ba301cd5ee14f5855 | 685,358 |
def get_dividing_point(y: list):
"""
找出不同样例的分界点
Args:
y (list): 数据标签
Returns:
int: -1表示全部相同,否则表示分界点
"""
last = y[0]
for i, yi in enumerate(y):
if yi != last:
return i
else:
last = yi
return -1 | 9ec03107d1a340bb5464aae92db62b3e136abc04 | 685,359 |
def parse_imr_line(line):
"""
Parses a line of the IMR csv dataset to tupples
:param line:
:return: ( (label1 (int), label2 (int)), features(list of float) )
"""
sl = line.split(";")
if not sl[1].isdigit():
return
label1 = int(sl[1])
label2 = int(sl[2])
features = map(float, sl[3:])
return ((label1, label2), features) | d9075a74f94a126489fcff75884456dfad4960b8 | 685,360 |
def _log_level_from_verbosity(verbosity):
"""Get log level from verbosity count."""
if verbosity == 0:
return 40
elif verbosity == 1:
return 20
elif verbosity >= 2:
return 10 | 6b60fde72e6c819827f137e5fa659fdfdcbfbed7 | 685,364 |
def intersects(region1, region2):
"""
Check if two regions intersect.
If regions share an end, they don't intersect unless one of the regions has zero length in which case they do
intersect.
Arguments:
region1 -- a first region.
region2 -- a second region.
Returns True if regions intersect and False otherwise.
"""
if region1.a == region1.b:
return region2.a <= region1.a and region2.b >= region1.b
if region2.a == region2.b:
return region1.a <= region2.a and region1.b >= region2.b
if region1.a >= region2.b:
return False
if region1.b <= region2.a:
return False
return True | c0b0f2d376e89ed2c65de8dedc9ff62f6392032b | 685,368 |
import functools
import operator
def get_confidence(model, tag_per_token, class_probs):
"""
Get the confidence of a given model in a token list, using the class probabilities
associated with this prediction.
"""
token_indexes = [model._model.vocab.get_token_index(tag, namespace = "labels") for tag in tag_per_token]
# Get probability per tag
probs = [class_prob[token_index] for token_index, class_prob in zip(token_indexes, class_probs)]
# Combine (product)
prod_prob = functools.reduce(operator.mul, probs)
return prod_prob | 7089dc157c2e9a7c637e4a27e9ddd26ee2505871 | 685,371 |
def gap_line(x, x0, y0, m):
"""
Return the y-value of the gap at the provided x values given.
Simply computes the function for a line
y = (x - x0) * m + y0
"""
return (x - x0) * m + y0 | 9df6f36e804d7629ad7f6e72ade981a5731ec9ba | 685,375 |
def normalise_sequence(input_sequence):
"""
Normalise a list or tuple to produce a tuple with values representing the proportion of each to the total of the
input sequence.
"""
return (i_value / sum(input_sequence) for i_value in input_sequence) | cbbe1948152834d9282a146d24a1584943fd7166 | 685,376 |
def replace_all(text, dic):
"""
Replaces all occurrences in text by provided dictionary of replacements.
"""
for i, j in list(dic.items()):
text = text.replace(i, j)
return text | e838486cbc7f013e96c592ba9d761e7c23a6f448 | 685,378 |
def create_document1(args):
""" Creates document 1 -- an html document"""
return f"""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body style="font-family:sans-serif;margin-left:2em;">
<table>
<tbody style="border-collapse: collapse;border: 10px solid #d9d7ce; display: inline-block;padding: 6px 12px;line-height:50px;text-align: left;vertical-align: bottom;">
<tr>
<td colspan="2"><h1 style='text-align: left; color: black;'><u>DocuMap</u></h1></td>
</tr>
<tr>
<td colspan="2"><h2 style='text-align: left; color: black;'>{args["env_subject"]}</h2></td>
</tr>
<tr>
<td colspan="2" style="text-align: justify;"><pre><h3>{args["env_description"]}</h3></pre></td>
</tr>
<tr>
<td><b>Map URL: </b></td>
<td><h3 style="color:white;">**map_url**/</h3></td>
</tr>
<tr>
<td><b>Version: </b></td>
<td><span style="color:black;">{args["env_version"]}</span></td>
</tr>
<tr>
<td><b>Feedback: </b></td>
<td><span style="color:white;">**feedback**/</span></td>
</tr>
<tr style="line-height:80px;">
<td><b>Revision required ? : </b></td>
<td> Yes<span style="color:white;">**r1**/</span> No<span style="color:white;">**r2**/</span></td>
</tr>
<tr>
<td style="line-height:80px;"><b>Attach document: </b></td>
<td><span style="color:white;">**signer_attachment**/</span></td>
</tr>
<tr style="line-height:120px;">
<td><b>Agreed: </b></td>
<td><span style="color:white;">**signature_1**/</span></td>
</tr>
</tbody>
</table>
</body>
</html>
""" | eff68b4b59cd1c1b5f60b2a0d3edf7e9b25130d8 | 685,383 |
def source_to_locale_path(path):
"""
Return locale resource path for the given source resource path.
Locale files for .pot files are actually .po.
"""
if path.endswith("pot"):
path = path[:-1]
return path | 6a2ca315e7bb2dfe03dede7c2be06602ff47cb40 | 685,385 |
def is_arraylike(x):
"""
Determine if `x` is array-like. `x` is index-able if it provides the
`__len__` and `__getitem__` methods. Note that `__getitem__` should
accept 1D NumPy integer arrays as an index
Parameters
----------
x: any
The value to test for being array-like
Returns
-------
bool
`True` if array-like, `False` if not
"""
return hasattr(x, '__len__') and hasattr(x, '__getitem__') | 516d37891dc73d134f7b88a13530aca35011b2d5 | 685,388 |
import random
import math
def roulette_index(n, random=random):
"""Randomly choose an index from 0...n-1. Choice has a weight of (index+1)."""
rouletteValue = random.randint(0, n * (n-1) // 2)
return math.ceil(-0.5 + math.sqrt(0.25 + 2 * rouletteValue)) | 410db3423f592b956af1e49a47fa8d59ff52b630 | 685,389 |
def calculate_interval(start_time, end_time, deltas=None):
"""Calculates wanted data series interval according to start and end times
Returns interval in seconds
:param start_time: Start time in seconds from epoch
:param end_time: End time in seconds from epoch
:type start_time: int
:type end_time: int
:param deltas: Delta configuration to use. Defaults hardcoded if no
configuration is provided
:type deltas: dict(max time range of query in seconds: interval to use
in seconds)
:rtype: int - *Interval in seconds*
"""
time_delta = end_time - start_time
deltas = deltas if deltas else {
# 15 min -> 10s
900: 10,
# 30 min -> 30s
1800: 30,
# # 1 hour -> 1s
# 3600 : 1,
# # 1 day -> 30s
# 86400 : 30,
# 3 days -> 1min
259200: 60,
# 7 days -> 5min
604800: 300,
# 14 days -> 10min
1209600: 600,
# 28 days -> 15min
2419200: 900,
# 2 months -> 30min
4838400: 1800,
# 4 months -> 1hour
9676800: 3600,
# 12 months -> 3hours
31536000: 7200,
# 4 years -> 12hours
126144000: 43200,
}
for delta in sorted(deltas.keys()):
if time_delta <= delta:
return deltas[delta]
# 1 day default, or if time range > max configured (4 years default max)
return 86400 | fa681e9a8a0bf8b8a82d955e6ffb565264b145e5 | 685,390 |
def flatten(x: dict, prefix="", grouped=True) -> dict:
"""Flattens dictionary by a group (one level only).
Arguments:
x {dict} -- Dictionary to be flattened.
Keyword Arguments:
prefix {str} -- Group prefix to flatten by. (default: {''})
grouped (bool) -- True if parameters are internally grouped by key. (default: {True})
Returns:
dict -- New flattened dictionary.
"""
output = {}
def flatten_inner(x: dict, output: dict, prefix: str):
for k, v in x.items():
output[f"{prefix}{k}"] = v
if grouped:
for k, v in x.items():
flatten_inner(v, output, prefix + k)
else:
flatten_inner(x, output, prefix)
return output | fa1e402bed2027d478ca1b29d9274c43a26fa980 | 685,394 |
from typing import Mapping
def _parse_columns(columns):
"""Expects a normalized *columns* selection and returns its
*key* and *value* components as a tuple.
"""
if isinstance(columns, Mapping):
key, value = tuple(columns.items())[0]
else:
key, value = tuple(), columns
return key, value | 9b6f368fa6af69fdbf149799f040eaa3df6a811b | 685,400 |
from bs4 import BeautifulSoup
import re
def review_to_wordlist(review):
"""
Function to convert a document to a sequence of words.
Returns a list of words.
"""
# Remove HTML
review_text = BeautifulSoup(review).get_text()
#
# Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
#
# Convert words to lower case and split them
words = review_text.lower().split()
return(words) | 837cd5cf942130906fbeae90945b008f82ea195e | 685,403 |
def _IsNamedTuple(x):
"""Returns whether an object is an instance of a collections.namedtuple.
Examples::
_IsNamedTuple((42, 'hi')) ==> False
Foo = collections.namedtuple('Foo', ['a', 'b'])
_IsNamedTuple(Foo(a=42, b='hi')) ==> True
Args:
x: The object to check.
"""
return isinstance(x, tuple) and hasattr(x, '_fields') | 93c7438f5472c8d8b95b48aaced4833e0632c3c7 | 685,404 |
def MaybeAddColor(s, color):
"""Wrap the input string to the xterm green color, if color is set.
"""
if color:
return '\033[92m{0}\033[0m'.format(s)
else:
return s | ca082aecae9bf62d8569c030b4bdf7ed6f7cad3d | 685,407 |
def _memory_usage(df):
"""Return the total memory usage of a DataFrame"""
return df.memory_usage(deep=True).sum() | 595b947d75df08b34bcaaf72b3023a5e86268d45 | 685,408 |
from typing import List
def assert_empty_line_between_description_and_param_list(
docstring: List[str]
) -> List[str]:
"""
make sure empty line between description and list of params
find first param in docstring and check if there is description above it
if so, make sure that there is empty line between description and param list
:param docstring: list of lines in docstring
:return: list of lines in docstring
"""
prefixes = [":param", ":return", ":raises"]
start_of_param_list = -1
for i in range(len(docstring)):
line = docstring[i].strip()
# check if it starts with prefix
for prefix in prefixes:
if line.startswith(prefix) and i > 1:
start_of_param_list = i
break
if start_of_param_list != -1:
break
if start_of_param_list == -1:
return docstring
# remove all empty lines before param list and enter a single empty line before param list
while docstring[start_of_param_list - 1].strip() == "":
docstring.pop(start_of_param_list - 1)
start_of_param_list -= 1
docstring.insert(start_of_param_list, "")
return docstring | d03eb05064609928020c84785b561cd6b7bfa1f6 | 685,412 |
def token_sub(request):
"""Returns the sub to include on the user token."""
return request.param if hasattr(request, 'param') else None | 8cb5b0783e7333aa83c65ae8f57414e6f588550e | 685,414 |
def offset_error(x_gt, x_pred, loss_mask):
"""
masked offset error for global coordinate data.
inputs:
- x_gt: ground truth future data. tensor. size: (batch, seq_len, 2)
- x_pred: future data prediction. tensor. size: (batch, seq_len, 2)
- loss_mask: 0-1 mask on prediction range. size: (batch, seq_len, 1)
We now want mask like [0, 0, ..., 1, 1, ..,1, 0., 0] to work. And it works.
outputs:
- oe: (batch, seq_len)
"""
oe = (((x_gt - x_pred) ** 2.).sum(dim=2)) ** (0.5) # (batch, seq_len)
oe_masked = oe * loss_mask.squeeze(dim=2) # (batch, seq_len)
return oe_masked | 3ccd15a32d24c2461e15c74e4b94c958966fd8af | 685,426 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.