content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def filter_to_sentry(event, hint):
"""filter_to_sentry is used for filtering what
to return or manipulating the exception before sending
it off to Sentry (sentry.io)
The 'extra' keyword is part of the LogRecord
object's dictionary and is where the flag
for sending to Sentry is set.
Example use case:
current_app.logger.error(
err_formatted, exc_info=ex, extra={'to_sentry': True})
"""
# By default, we send to sentry
to_sentry = event['extra'].get('to_sentry', True)
if to_sentry:
return event
return None | 3efb1be247d4dd657ddce2c8c09b88618d1a2e24 | 683,369 |
def _capture_callback(x):
"""Validate the passed options for capturing output."""
if x in [None, "None", "none"]:
x = None
elif x in ["fd", "no", "sys", "tee-sys"]:
pass
else:
raise ValueError("'capture' can only be one of ['fd', 'no', 'sys', 'tee-sys'].")
return x | 79a905c5793fabe43475ecc8a46c162f01060250 | 683,372 |
def get_new_sol_file(test_nr):
""" Get name of new solution file """
return "test/{0}-new.sol".format(test_nr) | 99da7df153c741d2360529696ee9fcc129ed099c | 683,376 |
import re
def unwrap_wrapped_text(text):
""" Unwraps multi-line strings, but keeps paragraphs separated by
two or more newlines separated by newlines.
"""
result = []
_re_indentation = re.compile(r'^[ \t]*|[ \t]*$', re.MULTILINE)
for paragraph in re.split(r'\n{2,}', text):
paragraph = _re_indentation.sub('', paragraph)
result.append(re.sub(r'\n', ' ', paragraph))
return '\n'.join(result) | 47e2006fd7617bc9d6b125a786eed5e953eb5bbb | 683,382 |
def test_hindcast_verify_brier_logical(hindcast_recon_1d_ym):
"""Test that a probabilistic score requiring a binary observations and
probability initialized inputs gives the same results whether passing logical
as kwarg or mapping logical before for hindcast.verify()."""
he = hindcast_recon_1d_ym
def logical(ds):
return ds > 0.5
brier_logical_passed_as_kwarg = he.verify(
metric="brier_score",
comparison="m2o",
logical=logical,
dim="member",
alignment="same_verif",
)
brier_logical_mapped_before_and_member_mean = (
he.map(logical)
.mean("member")
.verify(metric="brier_score", comparison="e2o", dim=[], alignment="same_verif")
)
brier_logical_mapped_before_no_member_mean = he.map(logical).verify(
metric="brier_score", comparison="m2o", dim="member", alignment="same_verif"
)
assert (
brier_logical_mapped_before_and_member_mean == brier_logical_passed_as_kwarg
).all()
assert (
brier_logical_mapped_before_no_member_mean == brier_logical_passed_as_kwarg
).all() | f1d3dd92f65610ead5d154ffdc7d456364907ffb | 683,387 |
import re
def could_be_content_page(url: str) -> bool:
"""
Try to guess if the link is a content page.
It's not a perfect check, but it can identify URLs that are obviously not content.
"""
url = url.lower().rstrip('/')
if url.endswith('/signin') or url.endswith('/login') or \
url.endswith('/login-page') or url.endswith('/logout'):
return False
if url.endswith('/my-account') or url.endswith('/my-wishlist'):
return False
if re.search('/(lost|forgot)[_-]password$', url):
return False
if url.endswith('/search') or url.endswith('/archive'):
return False
if url.endswith('/privacy-policy') or url.endswith('/cookie-policy') or \
url.endswith('/terms-conditions'):
return False
if url.endswith('/tos') or re.search('/terms[_-]of[_-](service|use)$', url):
return False
# Yei, it might be a content page
return True | 8aa139abe5d8b5b185bfdb65d9defcb4be00e068 | 683,389 |
def collide_rect(sprite1, sprite2):
"""
**pyj2d.sprite.collide_rect**
Check if the rects of the two sprites intersect.
Can be used as spritecollide callback function.
"""
return sprite1.rect.intersects(sprite2.rect) | 9fdb79f31b06f350c2e6f2b9d55f45d0ffb2c1b4 | 683,392 |
def compute_image_data_statistics(data_loader):
"""
Return the channel wise mean and std deviation for images loaded by `data_loader` (loads WebDataset defined in `datasets.py`)
"""
mean = 0.
std = 0.
n_samples = 0.
for images, bboxes, labels in data_loader:
batch_samples = images.size(0)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
n_samples += batch_samples
mean /= n_samples
std /= n_samples
return mean, std | 7ba1f73fc663d428586113cc5f9629e3bd57656f | 683,393 |
def int_to_roman(n):
"""
Convert an integer to its standard Roman Numeral representation
"""
V = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
S = ["M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"]
out = ""
for val,sym in zip(V,S):
while n >= val:
out += sym
n -= val
return out | 3127e7d7097758872f559ef918d790f282320669 | 683,396 |
def _get_module_ver_hash(prov):
"""Get module commit hash, falling back to semantic version, and finally 'UNKNOWN'"""
ver = None
subacts = prov[0].get('subactions')
if subacts:
ver = subacts[0].get('commit')
if not ver:
ver = prov[0].get('service_ver', 'UNKNOWN')
return ver | 7e80448df01512d69256c71efd24b4a69f736fc8 | 683,397 |
def get_nominal_conc(df, species):
"""
Get the nominal hector concentration for a given species
Parameters
----------
df : Pandas DataFrame
DataFrame containing output from a nominal hector run
species : str
Species to retrieve output for
Return
------
species_df : Pandas DataFrame
Pandas DataFrame containing hector concentration output for the given
species
"""
species = species.upper() # Ensure string is all uppercase
# ensure correct string for atmospheric co2 concentration
if (species == 'CO2'):
species = 'Ca'
species_df = df.loc[df['variable'] == species]
return species_df | ee483789de7c46e7d22568c48a15f039bce7a5f1 | 683,406 |
def gpib_control_ren(library, session, mode):
"""Controls the state of the GPIB Remote Enable (REN) interface line, and optionally the remote/local
state of the device.
Corresponds to viGpibControlREN function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param mode: Specifies the state of the REN line and optionally the device remote/local state.
(Constants.GPIB_REN*)
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viGpibControlREN(session, mode) | 4d9fc21bb3bca7cbd98c94c064500d0ce319e5cc | 683,409 |
def truncate(string: str, width: int, ending: str = "...") -> str:
"""Truncate string to be no longer than provided width. When truncated, add
add `ending` to shortened string as indication of truncation.
Parameters
----------
string: str
String to be truncated.
width: int
Maximum amount of characters before truncation.
ending: str, optional
Indication string of truncation.
Returns
-------
Truncated string.
"""
if not len(string) > width:
return string
length = width - len(ending)
return string[:length] + ending | 66dd6ca833b6290c51eb3804792b35d291fffb2d | 683,413 |
def split_ver_str(ver_str):
"""Split version string into numeric components.
Return list of components as numbers additionally checking
that all components are correct (i.e. can be converted to numbers).
"""
ver_list = []
for c in ver_str.split('.')[0:3]:
if not c.isdecimal():
raise RuntimeError("Malformed version string '{}'. "
"Component '{}' is not an integer."
.format(ver_str, c))
ver_list.append(int(c))
return ver_list | 11b204dbdbe89d5eb35422525b36b27600fb5945 | 683,414 |
import torch
def get_first_idx(numel_per_tensor):
"""Returns the first indices of each tensor in the :ref:`packed tensor <packed>`.
See :ref:`first_idx definition <packed_first_idx>` for more information.
Args:
numel_per_tensor (torch.LongTensor): The number of elements
(vertices, faces, points...) in each unbatched tensor, as a 1D tensor.
Returns:
(torch.LongTensor):
first indices for each unbatched tensor in the packed tensor,
and the last index + 1, as 1D tensor.
Example:
>>> numel_per_tensor = torch.LongTensor([2, 3, 5])
>>> get_first_idx(numel_per_tensor)
tensor([ 0, 2, 5, 10])
"""
output = torch.zeros((numel_per_tensor.shape[0] + 1,), dtype=torch.long,
device=numel_per_tensor.device)
torch.cumsum(numel_per_tensor, dim=0, out=output[1:])
return output | 4288a45facef5ba39e9f8c82b69cb245e3a77515 | 683,416 |
def shipping_charge(method, basket, postcode):
"""
Template tag for calculating the shipping charge for a given shipping
method and basket, and injecting it into the template context.
"""
return method.calculate(basket, postcode) | ced6bb9b0029a81540ae3816e5002e376a4b5a3b | 683,417 |
import re
def _re_compile(regex):
"""Compile a string to regex, I and UNICODE."""
return re.compile(regex, re.I | re.UNICODE) | ae312d1f3519171161ce394770c5d7115d8ac348 | 683,422 |
from io import StringIO
def df_to_csv_string(df):
"""Converts pandas DataFrame to a CSV string."""
out = StringIO()
df.to_csv(out, encoding='utf-8')
return out.getvalue() | 9445a71583a7458bbffae2950097b371b02d89c4 | 683,424 |
def left_shift(number, n):
"""
Left shift on 10 base number.
Parameters
----------
number : integer
the number to be shift
n : integer
the number of digit to shift
Returns
-------
shifted number : integer
the number left shifted by n digit
Examples
--------
>>> left_shift(152, 1)
15
>>> left_shift(14589, 3)
14
"""
return number // 10 ** n | e1d088fbfc2c64d8a976a15c26ce33b89824ad79 | 683,425 |
import math
def calculate_distance(location1, location2):
"""
Calculates the distance between two pairs of lat, long coordinates
using the Haversine formula
Inputs:
location1 - [lat, lon] array with first location
location2 - [lat, lon] array with second location
Outputs:
distance - in kilometers
"""
lat1 = math.radians(abs(location1[0]))
lon1 = math.radians(abs(location1[1]))
lat2 = math.radians(abs(location2[0]))
lon2 = math.radians(abs(location2[1]))
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
# Radius of earth in kilometers
r = 6371.0
return c * r | 7cca5bc7b06440eb548d41879f5c4f2b876c9076 | 683,426 |
import re
def rmsp(s):
"""Replace multiple spaces with one.
"""
return re.sub(r"\ +", ' ', s.strip()) | f65a345cb60e012d7ec6a02276f3aff5bf8fb938 | 683,427 |
def ir(some_value):
""" Rounds and casts to int
Useful for pixel values that cannot be floats
Parameters
----------
some_value : float
numeric value
Returns
--------
Rounded integer
Raises
------
ValueError for non scalar types
"""
return int(round(some_value)) | 8b487dd1b3c7a4d1095c70d6112916002642fd73 | 683,428 |
def decibels_to_amplitude_ratio(decibels):
"""The ratio between two amplitudes given a decibel change"""
return 2 ** (decibels/10) | 89940e9dfa38f45332159f34ade2e6bc4344daf1 | 683,430 |
import requests
def get_stock_data(symbol, token):
"""Send a request to the API with the symbol and token. Return the stock data we want:
Symbol, Company Name, Current Price"""
url = f"https://cloud.iexapis.com/stable/stock/{symbol}/quote?token={token}"
response = requests.get(url)
if response.status_code != 200:
print(response.text)
return None
all_stock_data = response.json()
name = all_stock_data["companyName"]
symbol = all_stock_data["symbol"]
current_price = all_stock_data["latestPrice"]
stock_data = {"name": name, "symbol": symbol, "current_price": current_price}
return stock_data | ec90c7147b3d1c0c88455ee5ccc159b354deef27 | 683,435 |
def handle_exhibition_desc(company: str, desc: str) -> str:
"""
Handles exhibition description special formatting needs.
Returns the updated description.
:param company: company name
:param desc: company description
"""
if company.lower() == "mathworks":
desc = desc.replace(" o ", "\n- ")
if company.lower() == "simulation and data lab neuroscience":
desc = desc.replace("\n•", "\n- ")
return desc | d4b2de017095c375b1942c20dd00ccc64ca78ab0 | 683,438 |
import click
def soft_nprocs(soft, nprocs):
"""Reduce the number of ranks to the largest acceptable soft value"""
# If no soft specification given, use -n value
if not soft:
return nprocs
# Filter to values between 1 and nprocs
try:
return max([x for x in soft if 0 < x <= nprocs])
except ValueError:
# pylint: disable=raise-missing-from
raise click.UsageError("No soft values found between 1 and %d" % nprocs) | 84c60c73cbb5d9e8436ddbef402f2c8ea1d8c096 | 683,439 |
def _IsOverlapping(alert_entity, start, end):
"""Whether |alert_entity| overlaps with |start| and |end| revision range."""
return (alert_entity.start_revision <= end and
alert_entity.end_revision >= start) | 9d9b99fab4a481198d1aaf62bef2c951951f8f91 | 683,443 |
import torch
def GTA_prop_to_hot(img, n_classes: int, width: int, height: int):
"""
This function turns the output of the network (given in probability format)
into the most likely onehot encoded output.
Args:
img (tensor): The tensor with probabilities.
n_classes (int): Amount of classes in the onehot encoded format.
width (int): Size width of the given tensor.
height (int): Size height of the given tensor.
Returns:
Tensor: In onehot encoded format.
"""
mat = torch.argmax(img, dim=1)
map = torch.zeros((n_classes, height, width), dtype=torch.uint8)
for r in range(height):
for c in range(width):
map[mat[0][r][c]][r][c] = 1
return map | 814eed702809edadc37d8d8a38085f5b2ad653c0 | 683,445 |
import math
def polar2cart(r, x0, y0, theta):
"""Changes polar coordinates to cartesian coordinate system.
:param r: Radius
:param x0: x coordinate of the origin
:param y0: y coordinate of the origin
:param theta: Angle
:return: Cartesian coordinates
:rtype: tuple (int, int)
"""
x = int(x0 + r * math.cos(theta))
y = int(y0 + r * math.sin(theta))
return x, y | 92f55ebefb1c34989eebf008b8e371567f9adb80 | 683,448 |
def binary_search(query, array):
"""
Determine whether the query is in an sorted array.
Return the index of the query if it is present in the array.
If the query is not in the array, return -1
>>> binary_search(4, [1, 2, 3, 4, 5, 6, 7, 8, 9])
3
>>> binary_search(8, [1, 2, 3, 4, 5, 6, 7, 8, 9])
7
>>> binary_search(10, [1, 2, 3, 4, 5, 6, 7, 8, 9])
-1
"""
lo, hi = 0, len(array) - 1
while lo <= hi:
mid = (lo + hi) // 2
if query < array[mid]:
hi = mid - 1
elif query > array[mid]:
lo = mid + 1
else:
return mid
return -1 | 4ce1be839b46c71671d7fe2053276fd55c8183e1 | 683,449 |
import torch
def calculate_output_dim(net, input_shape):
"""Calculates the resulting output shape for a given input shape and network.
Args:
net (torch.nn.Module): The network which you want to calculate the output
dimension for.
input_shape (int | tuple[int]): The shape of the input being fed into the
:obj:`net`. Batch dimension should not be included.
Returns:
The shape of the output of a network given an input shape.
Batch dimension is not included.
"""
if isinstance(input_shape, int):
input_shape = (input_shape,)
placeholder = torch.zeros((0,) + tuple(input_shape))
output = net(placeholder)
return output.size()[1:] | d0eea20892c9f90578a6c23296cb59998d8b37aa | 683,456 |
def markdown_escape_filter(text):
"""Escape special characters in Markdown."""
return text.replace("\\", "\\\\").replace("`", "\\`").replace(
"*", "\\*").replace("_", "\\_").replace("{", "\\{").replace(
"}", "\\}").replace("[", "\\[").replace("]", "\\]").replace(
"(", "\\(").replace(")", "\\)").replace("#", "\\#").replace(
"+", "\\+").replace("-",
"\\-").replace(".", "\\.").replace(
"!", "\\!").replace("|", "\\|") | a3e3df8ab1d5e374b45cc75dfb92faf984e65b03 | 683,460 |
def thousands_separator(value):
"""
千位分隔符
例如传入 1000000000,返回 1,000,000,000
:param value: 需要转换的数字
:return: 格式化后的字符串
"""
return '{:,}'.format(value) | c8db64e5f35df2a067ecdc8bbcb19633f8fecb88 | 683,463 |
from typing import List
import math
def equal_split(s: str, width: int) -> List[str]:
"""
Split the string, each split has length `width` except the last one.
"""
num = int(math.ceil(len(s) / width)) # python3
return [s[i * width: (i + 1) * width] for i in range(num)] | 46ae233f314136e36913834f40576a78fdab7bdf | 683,464 |
def prob1(l):
"""Accept a list 'l' of numbers as input and return a list with the minimum,
maximum, and average of the original list.
"""
ans = []
ans.append(min(l))
ans.append(max(l))
ans.append(float(sum(l))/len(l))
return ans | 9089518d0cbdca7c9f99e32f74d77f9a3c191c5c | 683,471 |
def get_ns_name(uri):
"""
Get the namespace (the namespace is placed before the first '#' character or the last '/' character)
"""
hash_index = uri.find('#')
index = hash_index if hash_index != -1 else uri.rfind('/')
namespace = uri[0: index + 1]
return namespace | faa29e28d448b6d0b571bc6cc4a62bf8840b0973 | 683,476 |
import struct
def pack_date(date):
"""
Packs a date (assumed to be UTC) as a struct with of 16-bit, unsigned `year`
(big endian), 1 byte `month`, and 1 byte `day`.
"""
return struct.pack("!HBB", date.year, date.month, date.day) | 7b7c51d2b75b767bf2f90dbe71af65c110e02ac7 | 683,477 |
from typing import Set
def allocate_mid(mids: Set[str]) -> str:
"""
Allocate a MID which has not been used yet.
"""
i = 0
while True:
mid = str(i)
if mid not in mids:
mids.add(mid)
return mid
i += 1 | 994b871a19edde7d8551dc641884e973f427889d | 683,478 |
def _xor(a,b):
"""Return true iff exactly one of and b are true. Used to check
some conditions."""
return bool(a) ^ bool(b) | 184ded6a4d06e586cdfcf2976e2199f1393c56a0 | 683,479 |
from typing import Any
from typing import get_args
from typing import get_origin
from typing import Literal
def is_str_literal(hint: Any) -> bool:
"""Check if a type hint is Literal[str]."""
args = get_args(hint)
origin = get_origin(hint)
if origin is not Literal:
return False
if not len(args) == 1:
return False
return isinstance(args[0], str) | 20e4654ba6c0459f7b664a89b4f67d5d807c3f33 | 683,480 |
def get_pitch_at_time(genotype, time):
"""given genotype and durk time point, returns pitch being played at that durk point and whether or not pitch ends perfectly on time
Args:
genotype ((int, int)[]): genotype of chromosome, which is list of (pitch, dur)
time (int): time point in durks
Returns:
(int, bool): (pitch, end_on_time) where pitch is [1,21] and end_on_time represents whether or not the pitch ends perfectly at the time point time.
"""
pitch = -9999
dur_running_total = 0
end_on_time = False
for i, (ed, dur) in enumerate(genotype):
dur_running_total += dur
if dur_running_total > time:
pitch = ed
break
elif dur_running_total == time:
pitch = ed
end_on_time = True
break
return (pitch, end_on_time) | f38bf6b8245b5169b20abf1f3caad43fee6b7356 | 683,482 |
def _get_source_files(commands):
"""Return a list of all source files in the compilation."""
return list(commands.keys()) | 1d4307e19acdadf06c0d54ba3c984ebcb6604f8b | 683,487 |
def dnode(period):
"""
Orbit nodal precession on each rev. Orbits below GEO move (drift) Westerly
while orbits above GEO move Easterly. Orbits at GEO are stationary, 0 deg drift.
Use siderial day rotation for better accuracy.
Arg:
period [sec]
Return:
node precession (dn) [deg]
"""
return 360 - 360/(23*3600+56*60+4)*period | 2933fe77fd42c3f8899db898493d0549b82e223f | 683,492 |
def process_search_term(lookup):
"""
Removes any whitespace from the search string, and replaces them with the appropriate character to pass as a URL
:param lookup:
:return: lookup
"""
lookup = lookup.replace(" ", "+")
return lookup | 51b3d2b38aa6d538f90a855df861b674041035b1 | 683,493 |
import torch
def check_gpu(gpu):
"""
Fuction takes one argument as boolean and provides support for gpu or cpu selection and print out the current device being used.
Command Line Arguments:
1. GPU as True value that enables GPU support and use cuda for calculation, and False to enable CPU.
Function returns device and print out the device being used, either for trianing or inference.
"""
# If gpu is True gpu is enabled and print out "\nGPU is availabe...". if the gpu didn't exist device switchs to cpu and print out "\nDevice didn't find GPU, using CPU instead"
if gpu:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("\nGPU is availabe...")
else:
print("\nDevice didn't find GPU, using CPU instead")
else:
print("\nCPU is availabe...")
return torch.device("cpu")
return device | 6b3d42843dbe9e7f287024c12233abc130ede29d | 683,494 |
import csv
import re
def load_peptides(input_file, peptide_column, column_separator):
"""
Parses the input file and extracts all peptides occuring within the file. Peptide strings
are cleaned (only valid characters retained) and returned as a set.
:param input_file: The file to parse
:param peptide_column: The column header to extract the peptides from.
:param column_separator: The separator used for the columns
:return: A set of strings representing the peptides.
"""
with open(input_file, "r") as input_stream:
csv_reader = csv.DictReader(input_stream, delimiter=column_separator)
peptides = set()
for row in csv_reader:
if peptide_column not in row:
raise Exception("Specified peptide column '" + peptide_column + "' not found in input file.")
sequence = row[peptide_column]
clean_sequence = re.sub("[^A-Z]", "", sequence)
peptides.add(clean_sequence)
return peptides | 2c569815cb6fe2cd3c900abd28df0d20d592a131 | 683,495 |
def input_prompt(prompt):
"""
Get user input
"""
return input(prompt) | 76724d278cf68ef2b16b7aa35ee61336a373f9ca | 683,497 |
from typing import List
def alphabetical_binary_search(sorted_list: List, search_name: str) -> int:
"""Alphabetical binary search (for study purpuses)
Args:
sorted_list (List): A list of names (must be ordered)
search_name (str): name to search
Returns:
int: found index or -1
"""
lowest_index = 0
highest_index = len(sorted_list) - 1
intermediate_index = highest_index // 2
while lowest_index <= highest_index:
name = sorted_list[intermediate_index]
if search_name == name:
return intermediate_index
if name > search_name:
highest_index = intermediate_index - 1
if name < search_name:
lowest_index = intermediate_index + 1
intermediate_index = (lowest_index + highest_index) // 2
return -1 | 9c13c757051ce0585857b3fd52f0ce8ff257fc01 | 683,501 |
import torch
def compute_loss(inputs, outputs, criterion, edge_criterion):
"""Compute loss automatically based on what the model output dict contains.
'doc_logits' -> document-label CE loss
'para_logits' -> paragraph-label CE loss
'pare_edge_weights' -> additional edge CE loss
"""
if 'para_logits' in outputs:
loss = criterion(outputs['para_logits'], inputs['para_label'])
loss *= (inputs['para_lengths'] != 0).to(torch.float)
loss = loss.sum()
else:
loss = criterion(outputs['doc_logits'], inputs['label']).sum()
if 'para_edge_weights' in outputs:
edge_loss_lambda = 0.1
edge_loss = edge_criterion(outputs['para_edge_weights'], 1 - inputs['para_label'])
edge_loss *= (inputs['para_lengths'] != 0).to(torch.float)
loss += edge_loss_lambda * edge_loss.sum()
return loss | fac6c23b1654b830d57f9197e22372a90b7d9176 | 683,503 |
import base64
def decode_base64(input_string: str) -> bytes:
"""Decode an unpadded standard or urlsafe base64 string to bytes."""
input_bytes = input_string.encode("ascii")
input_len = len(input_bytes)
padding = b"=" * (3 - ((input_len + 3) % 4))
# Passing altchars here allows decoding both standard and urlsafe base64
output_bytes = base64.b64decode(input_bytes + padding, altchars=b"-_")
return output_bytes | c126aa8eba0f493687bb5d19c93cbbfa09178446 | 683,505 |
def normalize_extension(extension):
"""
Normalize extension
Converts given extension to canonical format for storage
:param extension: original extension
:return: normalized extension
"""
extension = extension.lower()
exts = dict()
exts['jpg'] = ['jpeg','jpe','jif','jfif','jfi''jp2','j2k','jpx','jpf','jpm']
exts['tif'] = ['tiff']
exts['tar.gz'] = ['tgz']
for canonical, variants in exts.items():
if extension in variants:
extension = canonical
break
return extension | 14817187abe2b757f1c987bbc3b11df5613d187c | 683,510 |
def parseTrackLog(line):
"""Parse trackLog line and return important fields: db, year, month, hgsid,
and a list of tracks"""
#### Sample line being processed ####
# [Sun Mar 05 04:11:27 2017] [error] [client ###.###.###.##] trackLog 0 hg38 hgsid_### cytoBandIdeo:1,cloneEndCTD:2
####
splitLine = line.strip().split('trackLog')
prefix = splitLine[0].split()
month = prefix[1]
year = prefix[4].replace("]","")
suffix = splitLine[1].split()
db = suffix[1]
hgsid = suffix[2]
if len(suffix) > 3:
activeTracks = suffix[3]
tracks = activeTracks.split(",")
else:
tracks = []
return db, year, month, hgsid, tracks | ac4b1d373671c526f3e9d1cade9648e94c8c9611 | 683,512 |
import re
def getWords(text):
"""From a text input as a string, get the words separated by a space and return it as a list of strings"""
return re.compile('\w+').findall(text) | 96a75090463576034aad8693fe5387f9b59f9711 | 683,513 |
def sqdist(point1, point2, rotmat):
"""
This routine calculates the anisotropic distance between two points
given the coordinates of each point and a definition of the
anisotropy.
This method only consider a single anisotropy senario.
Parameters
----------
point1 : tuple
Coordinates of first point (x1,y1,z1)
point2 : tuple
Coordinates of second point (x2,y2,z2)
rotmat : 3*3 ndarray
matrix of rotation for this structure
Returns
-------
sqdist : scalar
The squared distance accounting for the anisotropy
and the rotation of coordinates (if any).
"""
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
dz = point1[2] - point2[2]
sqdist = 0.0
for i in range(3):
cont = rotmat[i, 0] * dx + \
rotmat[i, 1] * dy + \
rotmat[i, 2] * dz
sqdist += cont * cont
return sqdist | f81d50b10e6ab27bebd84a442db295965a968982 | 683,518 |
def _is_path_within_scope(scope, fullpath):
"""Check whether the given `fullpath` is within the given `scope`"""
if scope == '/':
return fullpath is not None
fullpath = fullpath.lstrip('/') if fullpath else ''
scope = scope.strip('/')
return (fullpath + '/').startswith(scope + '/') | 44bba2f38cb095adf60e6950ee8e0f3c1fe72c7b | 683,523 |
def age_interp(request):
"""Fixture for age_interp flag."""
return request.param | ff76fa87502a4712a78af556e7bcaa93327ae9bb | 683,524 |
def avoids(word, forbidden):
""" Predicate that asks whether word avoids letters in the forbidden string
"""
# Feels like there should be a more efficient way to do this using
# set intersection, but I'll just check the word character by character
for letter in forbidden:
if word.find(letter)!=-1:
return False
return True | 93ddfceb051c913f14be0a70c372135c311bfb89 | 683,527 |
def get_vocabulary(path_to_vocab):
"""
Return a list of prefixes defining the vocabulary.
"""
vocab = []
with open(path_to_vocab) as f:
for line in f:
line = line.rstrip()
vocab.append(line)
return vocab | 5fecccc86903271da364e821432d4ac507810762 | 683,528 |
def b_delta(rewards,states,alpha):
"""
Implements the Resorla-Wagner (delta) learning rule.
V_intial is 0. Note: Null (0 or '0') states are silently skipped.
Returns two dictionaries containing value and RPE timecourses,
for each state.
"""
# Init
s_names = set(states)
V_dict = {}
RPE_dict = {}
for s in s_names:
V_dict[s] = [0.]
RPE_dict[s] = []
for r,s in zip(rewards,states):
## Skip terminal states
if (s == 0) | (s == '0'):
continue
V = V_dict[s][-1]
## the Delta rule:
RPE = r - V
V_new = V + alpha * RPE
## Store and shift V_new to
## V for next iter
V_dict[s].append(V_new)
## Store RPE
RPE_dict[s].append(RPE)
return V_dict, RPE_dict | ca8025307de097cdcc629141e944a0b91bcaf4bd | 683,529 |
def get_weight_shapes(num_inputs, layer_sizes, num_outputs):
"""
adapted from original tf_model.get_weight_shapes()
to convert from method to function
"""
weight_shapes = []
input_size = num_inputs
for i, layer in enumerate(layer_sizes):
weight_shapes.append((input_size, layer))
weight_shapes.append((layer,))
input_size = layer
weight_shapes.append((input_size, num_outputs))
weight_shapes.append((num_outputs,))
return weight_shapes | 8538bcc37785ee617571a795ba79d2621577b90d | 683,532 |
import warnings
def format_channel_id(ch):
""" Function for formatting an `idelib.dataset.Channel` or `SubChannel`
for display. Renders as only the channel and subchannel IDs (the other
information is shown in the rest of the table).
:param ch: The `idelib.dataset.Channel` or `idelib.dataset.SubChannel`
to format.
:return: A formatted "channel.subchannel" string.
"""
try:
if ch.parent:
return f"{ch.parent.id}.{ch.id}"
else:
return f"{ch.id}.*"
except (AttributeError, TypeError, ValueError) as err:
warnings.warn(f"format_channel_id({ch!r}) raised {type(err).__name__}: {err}")
return str(ch) | 289a579b215f21c58003eff2dbe86c226d71c05f | 683,533 |
def _IsBold(weight):
"""Is this weight considered bold?
Per Dave C, only 700 will be considered bold.
Args:
weight: Font weight.
Returns:
True if weight is considered bold, otherwise False.
"""
return weight == 700 | c0f1305ba836216e0533f73598149f20d2eb8a3d | 683,534 |
def add_attributes(rsrc_id, manifest):
"""Add additional attributes to the manifest."""
proid = rsrc_id[0:rsrc_id.find('.')]
environment = 'prod'
updated = {
'proid': proid,
'environment': environment
}
updated.update(manifest)
return updated | 37dc03c7139dbf72ce58b1465e287369a7dad2ac | 683,535 |
def setup_walkers(cfg_emcee, params, level=0.1):
"""Initialize walkers for emcee.
Parameters
----------
cfg_emcee: dict
Configuration parameters for emcee.
params: asap.Parameter object
Object for model parameters.
level: float, optional
Returns
-------
ini_positions: numpy array with (N_walker, N_param) shape
Initial positions of all walkers.
"""
# Initialize the walkers
if cfg_emcee['ini_prior']:
# Use the prior distributions for initial positions of walkers.
return params.sample(nsamples=cfg_emcee['burnin_n_walker'])
return params.perturb(nsamples=cfg_emcee['burnin_n_walker'], level=level) | 7a03d5f451a71f60acd64e7b22e852af99a9cefe | 683,536 |
def pack_4_4(x: int, y: int) -> int:
"""Pack two 4-bit values into an
8-bit value.
x and y must be in range 0..15 inclusive.
Result is in range 0..255 inclusive.
"""
assert 0 <= x <= 15
assert 0 <= y <= 15
return (x << 4) | y | a11b8b398b5f80b7a00437ac6e0ecf2b8e7a76aa | 683,537 |
def _GetLibMetadata(layer):
""" Return a dictionary of library-specific data found in layer."""
globalPrim = layer.GetPrimAtPath('/GLOBAL')
if not globalPrim:
raise Exception("Code generation requires a \"/GLOBAL\" prim with "
"customData to define at least libraryName. GLOBAL prim not found.")
if not globalPrim.customData:
raise Exception("customData is either empty or not defined on /GLOBAL "
"prim. At least \"libraryName\" entries in customData are required "
"for code generation.")
# Return a copy of customData to avoid accessing an invalid map proxy during
# template rendering.
return dict(globalPrim.customData) | d2117d0057a57eaeb2ef7ec88ec329c0d19e7848 | 683,538 |
def build_system_info(platform=None,
platform_type=None,
accel_type=None,
cpu_cores=None,
cpu_type=None,
cpu_sockets=None):
"""Information about the system the test was executed on.
Args:
platform (str): Higher level platform, e.g. aws, gce, or workstation.
platform_type (str): Type of platform, DGX-1, p3.8xlarge, or z420.
accel_type (str, optional): Type of accelerator, e.g. K80 or P100.
cpu_cores (int, optional): Number of physical cpu cores.
cpu_type (str, optional): Type of cpu.
cpu_sockets (int, optional): Number of sockets
Returns:
`dict` with system info.
"""
system_info = {}
if platform:
system_info['platform'] = platform
if platform_type:
system_info['platform_type'] = platform_type
if accel_type:
system_info['accel_type'] = accel_type
if cpu_cores:
system_info['cpu_cores'] = cpu_cores
if cpu_type:
system_info['cpu_type'] = cpu_type
if cpu_type:
system_info['cpu_sockets'] = cpu_sockets
return system_info | 6fad4e0563929a249a5e8709a20198db3689c2e3 | 683,539 |
import re
def strip_spaces(string):
"""Remove white-space from a string
Parameters
----------
string: str
Returns
-------
str
"""
pattern = re.compile(r'\s+')
return re.sub(pattern, '', string) | d3f2767371b49e3c8adc64eac3f61aa790e3a430 | 683,541 |
def hexint_parser(arg: str) -> int:
"""Parse a hexadecimal starting with 0x into an integer."""
if not arg.startswith("0x"):
raise Exception("Received non-hex integer where hex expected")
return int(arg, 16) | 2033e0df5eac9b5e61851586d22e86d31a7f9833 | 683,545 |
def orbtell(orb):
"""Query current connection read-head position"""
return orb.tell() | 3637f996b8bdb5feed883e045c787fc23abdee45 | 683,546 |
import random
def random_bbox(config):
"""Generate a random tlhw with configuration.
Args:
config: Config should have configuration including IMG_SHAPES,
VERTICAL_MARGIN, HEIGHT, HORIZONTAL_MARGIN, WIDTH.
Returns:
tuple: (top, left, height, width)
"""
img_shape = config.IMG_SHAPES
img_height = img_shape[0]
img_width = img_shape[1]
maxt = img_height - config.VERTICAL_MARGIN - config.HEIGHT
maxl = img_width - config.HORIZONTAL_MARGIN - config.WIDTH
t = int(random.uniform(config.VERTICAL_MARGIN, maxt))
l = int(random.uniform(config.HORIZONTAL_MARGIN, maxl))
h = config.HEIGHT
w = config.WIDTH
return t, l, h, w | 6399f65e23648122eb28792ec13b3c26af84ae28 | 683,549 |
from datetime import datetime
def from_epoch(seconds):
"""Given seconds since epoch, return a datetime object
Args:
seconds: Seconds since epoch
Returns:
datetime representation of seconds since epoch
"""
return datetime.utcfromtimestamp(float(seconds)) | 308a6275431029a2594ef4585296a2847d215232 | 683,551 |
def _word_feats(words):
"""
NLTK word feature generator for the NaiveBayesClassifier
"""
return dict([(word, True) for word in words]) | 19601ae61f23f1a9743443fa51d58d5a6479426d | 683,558 |
def build_machine(network=None,
machine_type=None,
preemptible=None,
service_account=None,
boot_disk_size_gb=None,
disks=None,
accelerators=None,
labels=None,
cpu_platform=None,
nvidia_driver_version=None,
enable_stackdriver_monitoring=None):
"""Build a VirtualMachine object for a Pipeline request.
Args:
network (dict): Network details for the pipeline to run in.
machine_type (str): GCE Machine Type string for the pipeline.
preemptible (bool): Use a preemptible VM for the job.
service_account (dict): Service account configuration for the VM.
boot_disk_size_gb (int): Boot disk size in GB.
disks (list[dict]): List of disks to mount.
accelerators (list[dict]): List of accelerators to attach to the VM.
labels (dict[string, string]): Labels for the VM.
cpu_platform (str): The CPU platform to request.
nvidia_driver_version (str): The NVIDIA driver version to use when attaching
an NVIDIA GPU accelerator.
enable_stackdriver_monitoring (bool): Enable stackdriver monitoring
on the VM.
Returns:
An object representing a VirtualMachine.
"""
return {
'network': network,
'machineType': machine_type,
'preemptible': preemptible,
'serviceAccount': service_account,
'bootDiskSizeGb': boot_disk_size_gb,
'disks': disks,
'accelerators': accelerators,
'labels': labels,
'cpuPlatform': cpu_platform,
'nvidiaDriverVersion': nvidia_driver_version,
'enableStackdriverMonitoring': enable_stackdriver_monitoring,
} | 6176c030c63f161343f84536bb39c05da37cd2d4 | 683,559 |
def copy_event_attributes(ev1, ev2):
"""Copy all attributes from one roxar event to another.
Args:
ev1: roxar event to copy into
ev2: roxar event to copy attributes from
Returns:
An updated version of ev1. Unaltered if the two events are not of same type.
"""
if ev1.type == ev2.type:
for key in ev1.attribute_keys:
ev1[key] = ev2[key]
return ev1 | 27f2c8539d832a6d641cf0b91b67c348ff12a7c8 | 683,561 |
def group_by(object_list, key_function):
"""
Return dictionary of objects grouped by keys returned by
`key_function` for each element in `object_list`.
`object_list` does not need to be sorted.
>>> group_by([1, 2, 3, 4, 5], lambda x: x % 2)
{0: [2, 4], 1: [1, 3, 5]}
"""
groups = dict()
for obj in object_list:
key = key_function(obj)
if key not in groups:
groups[key] = list()
groups[key].append(obj)
return groups | 2dda8d965895f8b1be077d7f3e8f6b9f1f5a77be | 683,562 |
def mass_hpa_tail_boom(
length_tail_boom,
dynamic_pressure_at_manuever_speed,
mean_tail_surface_area,
):
"""
Finds the mass of a tail boom structure of a human powered aircraft (HPA), following Juan Cruz's correlations in
http://journals.sfu.ca/ts/index.php/ts/article/viewFile/760/718
Assumes a tubular tail boom of high modules (E > 228 GPa) graphite/epoxy
:param length_tail_boom: length of the tail boom [m]. Calculated as distance from the wing 1/4 chord to the furthest tail surface.
:param dynamic_pressure_at_manuever_speed: dynamic pressure at maneuvering speed [Pa]
:param mean_tail_surface_area: mean of the areas of the tail surfaces (elevator, rudder)
:return: mass of the tail boom [m]
"""
l = length_tail_boom
q = dynamic_pressure_at_manuever_speed
area = mean_tail_surface_area
w_tb = (l * 1.14e-1 + l ** 2 * 1.96e-2) * (1 + ((q * area) / 78.5 - 1) / 2)
return w_tb | 8330b2b9c5b533acc7d2aa9b06b6bc53b796dd74 | 683,563 |
def find_first2(l, pred1, pred2):
"""
Find first occurrence in list satisfying two-step predicate.
:param l: list.
:param pred1: predicate on the list elements.
:param pred2: predicate on two list elements.
:return: index of first occurrence in list satisfying pred2(l[index-1], l[index])
or pred1(l[0]) if only one elment in the list; length of the list if not found.
"""
length = len(l)
index = length
if length > 0:
if length == 1:
if pred1(l[0]):
index = 0
else:
index = 1
else:
for i in range(1, length):
if pred2(l[i-1], l[i]):
index = i
break
return index | 7d5ec65a573056f617c1e90d9ba980a30ece0ffb | 683,564 |
from datetime import datetime
def meta_from_pid(product_id):
"""Extract metadata contained in a Landsat Product Identifier."""
meta = {}
parts = product_id.split("_")
meta["product_id"] = product_id
meta["sensor"], meta["correction"] = parts[0], parts[1]
meta["path"], meta["row"] = int(parts[2][:3]), int(parts[2][3:])
meta["acquisition_date"] = datetime.strptime(parts[3], "%Y%m%d")
meta["processing_date"] = datetime.strptime(parts[4], "%Y%m%d")
meta["collection"], meta["tier"] = int(parts[5]), parts[6]
return meta | 1f2dede4666a3183242f5089d3b917a43532f2cd | 683,570 |
import requests
def check_status(datum):
"""Check that both the url and image link are valid URLs and that the
image link isn't just a redirect.
"""
if requests.get(datum["url"], verify=False).status_code != 200:
return False
get_ = requests.get(datum["image"], verify=False)
if get_.status_code != 200:
return False
if get_.url != datum["image"]:
return False
return True | b26cb833e21f75d45402d9041bb8bae59a6009ef | 683,571 |
def recursive_find_xml_element( xmlnode, name, _nodes=None ):
"""
recursively finds all XML sub-elements with the name 'name', such as
for nd in recursive_find_xml_element( xmlnode, 'TestList' ):
pass
"""
if _nodes == None:
_nodes = []
for nd in xmlnode:
if nd.tag == name:
_nodes.append( nd )
recursive_find_xml_element( nd, name, _nodes )
return _nodes | 9f3a08ec3f3b3d653d7a3542394cff0fb74db9eb | 683,576 |
from typing import Callable
from typing import Union
import torch
from typing import Dict
from typing import List
from typing import Tuple
from typing import Set
from typing import Any
def _apply_to_tensors(
fn: Callable, container: Union[torch.Tensor, Dict, List, Tuple, Set]
) -> Any:
"""Recursively apply to all tensor in different kinds of container types."""
def apply(x: Union[torch.Tensor, Dict, List, Tuple, Set]) -> Any:
if torch.is_tensor(x):
return fn(x)
elif isinstance(x, dict):
return {key: apply(value) for key, value in x.items()}
elif isinstance(x, (list, tuple, set)):
return type(x)(apply(el) for el in x)
else:
return x
return apply(container) | 7c04c81da5626808dc9f70eba3f8882ea032798f | 683,577 |
def r10s(factor: float = 1) -> float:
"""
The horizontal screw spacing on the mounting rails
of a 10-inch half rack.
"""
return 236.525 * factor | 786593f1ae9fab906e116c01fd39abd3da17a5e9 | 683,579 |
import yaml
def read_experiment_config(config_file):
"""
Read experiment configuration yml file for setting up the optimization.
yml file contains the list of parameters, and whether each parameter is a fixed
parameter or a range parameter. Fixed parameters have a value specified, and range
parameters have a range specified.
Parameters
----------
config_file : str
File path for the experiment configuration file
Returns
-------
loaded_configs: dict
"""
# Load the experiment config yml file
with open(config_file, "r") as yml_config:
loaded_configs = yaml.safe_load(yml_config)
# Format parameters for Ax experiment
for param in loaded_configs.get("parameters", {}).keys():
loaded_configs["parameters"][param][
"name"
] = param # Add "name" attribute for each parameter
# Parameters from dictionary to list
loaded_configs["search_space_parameters"] = list(loaded_configs.get("parameters", {}).values())
return loaded_configs | 016448b00d77bc28156c6c32eae753f6bc75025b | 683,581 |
def unknown_labels(dim):
"""
The labels for the "unknown" basis. Just returns an empty list.
Parameters
----------
dim : int
Dimension
Returns
-------
list
"""
return [] | 37c8afc2ffce715024a2cd497ed44532510d6a69 | 683,583 |
import torch
from typing import Sequence
from typing import cast
def project_point_cloud_to_map(
xyz_points: torch.Tensor,
bin_axis: str,
bins: Sequence[float],
map_size: int,
resolution_in_cm: int,
flip_row_col: bool,
):
"""Bins an input point cloud into a map tensor with the bins equaling the
channels.
This code has been adapted from https://github.com/devendrachaplot/Neural-SLAM.
# Parameters
xyz_points : (x,y,z) pointcloud(s) as a torch.Tensor of shape (... x height x width x 3).
All operations are vectorized across the `...` dimensions.
bin_axis : Either "x", "y", or "z", the axis which should be binned by the values in `bins`.
If you have generated your point clouds with any of the other functions in the `point_cloud_utils`
module you almost certainly want this to be "y" as this is the default upwards dimension.
bins: The values by which to bin along `bin_axis`, see the `bins` parameter of `np.digitize`
for more info.
map_size : The axes not specified by `bin_axis` will be be divided by `resolution_in_cm / 100`
and then rounded to the nearest integer. They are then expected to have their values
within the interval [0, ..., map_size - 1].
resolution_in_cm: The resolution_in_cm, in cm, of the map output from this function. Every
grid square of the map corresponds to a (`resolution_in_cm`x`resolution_in_cm`) square
in space.
flip_row_col: Should the rows/cols of the map be flipped? See the 'Returns' section below for more
info.
# Returns
A collection of maps of shape (... x map_size x map_size x (len(bins)+1)), note that bin_axis
has been moved to the last index of this returned map, the other two axes stay in their original
order unless `flip_row_col` has been called in which case they are reversed (useful as often
rows should correspond to y or z instead of x).
"""
bin_dim = ["x", "y", "z"].index(bin_axis)
start_shape = xyz_points.shape
xyz_points = xyz_points.reshape([-1, *start_shape[-3:]])
num_clouds, h, w, _ = xyz_points.shape
if not flip_row_col:
new_order = [i for i in [0, 1, 2] if i != bin_dim] + [bin_dim]
else:
new_order = [i for i in [2, 1, 0] if i != bin_dim] + [bin_dim]
uvw_points = cast(
torch.Tensor, torch.stack([xyz_points[..., i] for i in new_order], dim=-1)
)
num_bins = len(bins) + 1
isnotnan = ~torch.isnan(xyz_points[..., 0])
uvw_points_binned: torch.Tensor = torch.cat(
(
torch.round(100 * uvw_points[..., :-1] / resolution_in_cm).long(),
torch.bucketize(
uvw_points[..., -1:].contiguous(), boundaries=uvw_points.new(bins)
),
),
dim=-1,
)
maxes = (
xyz_points.new()
.long()
.new([map_size, map_size, num_bins])
.reshape((1, 1, 1, 3))
)
isvalid = torch.logical_and(
torch.logical_and(
(uvw_points_binned >= 0).all(-1), (uvw_points_binned < maxes).all(-1),
),
isnotnan,
)
uvw_points_binned_with_index_mat = torch.cat(
(
torch.repeat_interleave(
torch.arange(0, num_clouds).to(xyz_points.device), h * w
).reshape(-1, 1),
uvw_points_binned.reshape(-1, 3),
),
dim=1,
)
uvw_points_binned_with_index_mat[~isvalid.reshape(-1), :] = 0
ind = (
uvw_points_binned_with_index_mat[:, 0] * (map_size * map_size * num_bins)
+ uvw_points_binned_with_index_mat[:, 1] * (map_size * num_bins)
+ uvw_points_binned_with_index_mat[:, 2] * num_bins
+ uvw_points_binned_with_index_mat[:, 3]
)
ind[~isvalid.reshape(-1)] = 0
count = torch.bincount(
ind.view(-1),
isvalid.view(-1).long(),
minlength=num_clouds * map_size * map_size * num_bins,
)
return count.view(*start_shape[:-3], map_size, map_size, num_bins) | 219731336af3891afb57663c16585514f2714795 | 683,586 |
def check_sorted(a):
"""Determines if list is sorted."""
for i, val in enumerate(a):
if i > 0 and val < a[i-1]:
return False
return True | 1c145592d0e062aef024b431842f699b82ad1644 | 683,587 |
def extract_properties_values_from_json(data, keys):
"""Extracts properties values from the JSON data.
.. note::
Each of key/value pairs into JSON conventionally referred
to as a "property". More information about this convention follow
`JSON Schema documentation <https://json-schema.org/understanding-json-schema/reference/object.html>`_.
Passing ``data`` argument for an example:
.. code-block:: python
data = {
'verb': 'GET',
'endpoint': 'users',
'host': 'http://localhost:8080'
...
}
along with ``keys`` argument for an example:
.. code-block:: python
keys = ('verb', 'endpoint', 'host')
Iterating over ``keys`` parameter values and
extracts the property value of ``data`` parameter by key with the
exact same value.
Result:
.. code-block:: python
('GET', 'users, 'http://localhost:8080')
:param dict data: An arbitrary data.
:param tuple|list|set keys: Iterable with values of type `str`.
:returns: Packaged values.
:rtype: `tuple`
"""
return tuple(data[key] for key in keys if key in data) | 91dba86eebd242d7bc1ed98fc4f71b187d50b154 | 683,591 |
def tcl_str(string: str = '') -> str:
""" Returns Tcl string surrounded by {}
:param string: Python string.
"""
return ' {' + string + '} ' | 1da215056ee1e455bb256e3551234bc1e9100aa0 | 683,593 |
from datetime import datetime
def create_identifier(hint: str = '') -> str:
"""
Can be used to create unique names for files by exploiting the uniqueness of the current date.
Be aware that if two identifiers are created during the same second they are equal!
Follows the form YYYY_MM_DD__hh_mm_ss.
:return: YYYY_MM_DD__hh_mm_ss_{hint}
"""
now = datetime.now()
dt_string = now.strftime("%Y_%m_%d__%H_%M_%S")
return f"{dt_string}_{hint}" if hint else dt_string | 7f2bcae9c107f71be9e8f19235b0f6e470ac1de6 | 683,599 |
from typing import List
def sieve(n: int) -> List[int]:
"""
A simple implementation of the http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
:param n: Maximum value to search up to, not included.
:return: List of primes upto but not including n.
"""
all_numbers = [True] * n
for i in range(2, int(n ** 0.5 + 1)):
if all_numbers[i]:
for f in range(i * i, n, i):
all_numbers[f] = False
primes = []
for i in range(2, n):
if all_numbers[i]:
primes.append(i)
return primes | 844093a01834878dc3f4d388835c5489674a1f50 | 683,602 |
def isValidID(id: str) -> bool:
""" Check for valid ID. """
#return len(id) > 0 and '/' not in id # pi might be ""
return id is not None and '/' not in id | 548d0b03ff33daf6dc541a15b8b5c9c7b8e9393b | 683,604 |
import math
def vershik_kerov_logan_shepp(n):
"""
Returns asymptotic value of ℓn for large n.
For a permutation σ∈Sn, let ℓ(σ) denote the maximal length of an increasing subsequence in σ.
Define ℓn = (1/n!) * ∑(σ∈Sn) ℓ(σ),
the average value of ℓ(σ) for a σ chosen uniformly at random from Sn.
Parameters
----------
n : int
denotes n in vershik equation as stated above
return : float
returns float number denoting asymptotic value of ℓn.
"""
if(n != int(n)):
raise ValueError(
"n must be integer"
)
return 2 * math.sqrt(n) | 5534fa666a5ade4d97ed056f687e8f39947760d4 | 683,606 |
def flip_corner(corner: tuple) -> tuple:
"""
Flip a tuple of a variable amount of sides
:param corner: tuple with number of sides
:return: flipped clock-wise tuple
"""
fliped_sides: list[str] = list()
for s in corner:
if s == 'N':
fliped_sides.append('W')
elif s == 'E':
fliped_sides.append('S')
elif s == 'S':
fliped_sides.append('E')
elif s == 'W':
fliped_sides.append('N')
return tuple(fliped_sides) | 0dd9ec10f0d330a3b54dc6250f9d511b2a6201ef | 683,609 |
def format_import(names):
"""Format an import line"""
parts = []
for _, name, asname in names:
if asname is None:
parts.append(name)
else:
parts.append(name + " as " + asname)
line = "import " + ", ".join(parts) + "\n"
return line | ad712e39e9ccd63abf7a989e6474aaca054b84dc | 683,614 |
def count_leading_spaces(string: str) -> int:
"""
Count the number of spaces in a string before any other character.
:param string: input string
:return: number of spaces
"""
return len(string) - len(string.lstrip(" ")) | 06d9393f5226a101cd0a81d974b9f7d317f8390c | 683,616 |
def replace_special_quotes(html_str: str):
"""
replace special quotes with html entities
"""
# special quotes
html_str = html_str.replace('“', '“')
html_str = html_str.replace('”', '”')
html_str = html_str.replace('’', '’')
html_str = html_str.replace('‘', '‘')
html_str = html_str.replace('„', '‚')
html_str = html_str.replace('‚', '&obquo;')
html_str = html_str.replace('‹', '&usbquo;')
html_str = html_str.replace('›', '&ensquo;')
return html_str | 247a03d912c2cc545826dee774adbe63be6c8c31 | 683,617 |
def bash_array(lst):
"""Converts python array [a, b, c] to bash array (a b c)"""
contents = ' '.join(str(x) for x in lst)
return '({:s})'.format(contents) | 25bafcf5e8f7d0e65eb7268b377c3a64e7d7fba8 | 683,618 |
import re
def get_length(s):
""" Determine the length of the string from it's name which is
prepended as:
"foobar%d" % N
"""
x = re.search("[\d]+$", s)
# there can be only one or no match here
n = 0
if x :
n = int(x.group(0))
return n | 6621eba4fcfa0eeb59c65eaf794aa37a85a76493 | 683,619 |
from typing import Optional
import re
def _try_parse_port(port_str: str) -> Optional[int]:
"""Tries to extract the port number from `port_str`."""
if port_str and re.match(r"^[0-9]{1,5}$", port_str):
return int(port_str)
return None | 0dac3e8a0979c8218526ded3b788d5877061c9d4 | 683,623 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.