content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import grp
def get_gid_from_group(group):
"""Return GID from group name
Looks up GID matching the supplied group name;
returns None if no matching name can be found.
NB returned GID will be an integer.
"""
try:
return grp.getgrnam(group).gr_gid
except KeyError as ex:
return None | e6e41240d3a57edd05d144dd912e981869623ab8 | 685,962 |
from pathlib import Path
def image_file_path(instance, filename, ext='.jpg'):
"""Returns the path with modified file name for the image files.
Args:
instance (object): instance of the file being uploaded.
filename (str): current name of the file.
Returns:
str: new file path.
"""
folder = Path('profile_pics')
user_id = Path(instance.user.username)
return str(folder / user_id.with_suffix(ext)) | ae3d3facad13afd8f92f4ac5c834b8944462018c | 685,966 |
def merge_variables(variables, name=None, **kwargs):
"""Merge/concatenate a list of variables along the row axis.
Parameters
----------
variables : :obj:`list`
A list of Variables to merge.
name : :obj:`str`
Optional name to assign to the output Variable. By default, uses the
same name as the input variables.
kwargs
Optional keyword arguments to pass onto the class-specific merge() call.
Possible args:
- sampling_rate (int, str): The sampling rate to use if resampling
of DenseRunVariables is necessary for harmonization. If
'highest', the highest sampling rate found will be used. This
argument is only used when passing DenseRunVariables in the
variables list.
Returns
-------
A single BIDSVariable of the same class as the input variables.
Notes
-----
- Currently, this function only support homogeneously-typed lists. In
future, it may be extended to support implicit conversion.
- Variables in the list must all share the same name (i.e., it is not
possible to merge two different variables into a single variable.)
"""
classes = set([v.__class__ for v in variables])
if len(classes) > 1:
raise ValueError("Variables of different classes cannot be merged. "
"Variables passed are of classes: %s" % classes)
sources = set([v.source for v in variables])
if len(sources) > 1:
raise ValueError("Variables extracted from different types of files "
"cannot be merged. Sources found: %s" % sources)
return list(classes)[0].merge(variables, **kwargs) | e239ae680e525dc3302ca099932b67d3221c96da | 685,967 |
def replace_fields(field_list, *pairs):
"""Given a list of field names and one or more pairs,
replace each item named in a pair by the pair.
fl = 'one two three'.split()
replace_fields(fl, ('two', 'spam'))
# ['one', ('two', 'spam'), 'three']
"""
result = list(field_list)
for field_name, source in pairs:
index = field_list.index(field_name)
result[index] = field_name, source
return result | 1745bc45df00bd4475bb01d9eb20b420cd24f6fc | 685,968 |
from typing import Optional
def rsubstringstartingwith(sub: str, s: str) -> Optional[str]:
"""
>>> rsubstringstartingwith('://', 'database://foo')
'foo'
>>> rsubstringstartingwith('://', 'database://foo://bar')
'bar'
>>> rsubstringstartingwith('://', 'foo')
None
"""
try:
return s[s.rindex(sub) + len(sub):]
except ValueError:
return None | b22172992a96eb7842e5fbc065b0a10608f2366e | 685,969 |
def parse_bool(x, true=('true', 'yes', '1', 'on'), add_true=(),
false=('false', 'no', '0', 'off'), add_false=()):
"""Parse boolean string.
Parameters
----------
x : bool or str
Boolean value as `bool` or `str`.
true : list of str
List of accepted string representations of `True` value.
add_true : list of str
Optional list to of `True` representations to append to the default list.
false : list of str
List of accepted string representations of `False` value.
add_false : list of str
Optional list of `False` representations to append to the default list.
Notes
-----
`true` and `false` should always consist of only lowercase strings,
as all comparisons are done after lowercasing `x`.
Raises
------
ValueError
If `x` is not `bool` and not contained either in `true` or `false`.
"""
if isinstance(x, bool):
return x
if isinstance(x, (int, float)) and x == 0 or x == 1:
return bool(x)
x = str(x).lower()
if add_true:
true = (*true, *add_true)
if add_false:
false = (*false, *add_false)
if x in true:
return True
if x in false:
return False
raise ValueError("Value '{}' can not be interpreted as boolean".format(x)) | eced20795d3455a38d0dc4f11d4805b60a7aa03d | 685,970 |
def get_space_from_string(space_str):
"""
Convert space with P, T, G, M to int
"""
M = 1024
G = 1024 * M
T = 1024 * G
P = 1024 * T
if 'M' in space_str:
return int(float(space_str.split('M')[0]) * M)
elif 'G' in space_str:
return int(float(space_str.split('G')[0]) * G)
elif 'T' in space_str:
return int(float(space_str.split('T')[0]) * T)
elif 'P' in space_str:
return int(float(space_str.split('P')[0]) * P)
else:
return int(space_str) | 34f28b1c20497a8bafdf9b08a6b9db9e4c61bc3f | 685,971 |
async def read_vlq(stream):
"""
Reads a VLQ from a stream, and returns both the parsed value and the bytes belonging to it.
:param stream: A stream object, with readexactly() defined.
:return: int, bytes: The parsed value and unparsed value of the VLQ.
"""
raw_bytes = b""
value = 0
while True:
tmp = await stream.readexactly(1)
raw_bytes += tmp
tmp = ord(tmp)
value <<= 7
value |= tmp & 0x7f
if tmp & 0x80 == 0:
break
return value, raw_bytes | af456b20ddb654ab76e8ca8e14fcff749b2cd301 | 685,972 |
def moles_to_pressure(volume: float, moles: float, temperature: float) -> float:
"""
Convert moles to pressure.
Ideal gas laws are used.
Temperature is taken in kelvin.
Volume is taken in litres.
Pressure has atm as SI unit.
Wikipedia reference: https://en.wikipedia.org/wiki/Gas_laws
Wikipedia reference: https://en.wikipedia.org/wiki/Pressure
Wikipedia reference: https://en.wikipedia.org/wiki/Temperature
>>> moles_to_pressure(0.82, 3, 300)
90
>>> moles_to_pressure(8.2, 5, 200)
10
"""
return round(float((moles * 0.0821 * temperature) / (volume))) | 43c7f5346da68db758dfae6e256260054cfd5d39 | 685,976 |
def get_file_names(in_list):
"""Makes a list of each index[1] in a list of lists
This method is deployed in the get_medical_image_list route
:param in_list: list of lists containing patient medical
images and file names
:return: list containing file names
"""
temp = list()
for item in in_list:
temp.append(item[1])
return temp | 8bb61ba487bbdb5e880e9c82b04ceaaea364f483 | 685,977 |
import re
def _agg_removing_duplicates(agg_elem):
"""Aggregate while removing the duplicates.
Aggregate the labels or alt labels together as a single element
can have several Wikidata entities that need to be merged.
Args:
agg_elem (list of string): elem to aggregate
Returns:
string: aggregation with the deletion of the duplicates
"""
str_elem = '|'.join(agg_elem)
list_elem = str_elem.split('|')
# Removing the name of WikiData when there is no labels in the languages
regex = re.compile(r'Q[0-9]+')
list_elem = [elem for elem in list_elem if not regex.match(elem)]
list_elem_without_duplicates = list(set(list_elem))
list_elem_without_duplicates.sort()
elem_without_duplicates = '|'.join(list_elem_without_duplicates)
return elem_without_duplicates | f0b17d9acdd97452a728ed990f8150a72cbbe0f8 | 685,980 |
def get_aps_in_grid_inorder(ue_location, neighboring_aps_in_grid_unordered):
"""
Function to retrieve a list of neighboring APs in the increasing order
of ue_ap distance
"""
def distance(p1, p2):
return((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
neighboring_aps_in_grid = sorted(
neighboring_aps_in_grid_unordered,
key=lambda p: distance(p, ue_location)
)
# neighboring_aps_in_grid = neighboring_aps_in_grid_unordered.sort(
# key = lambda p: (p[0] - x)**2 + (p[1]- y)**2
# )
return neighboring_aps_in_grid | 8da92da801a15f966febbb62ac8040b678ab3cd7 | 685,983 |
import re
def is_git_error(txt):
"""
Whether response from the git command includes error
:param str txt:
:return:
:rtype: bool
"""
b_error = re.findall(r'^(.*?(\bfatal\b)[^$]*)$', txt, re.I | re.MULTILINE) \
or re.findall(r'^(.*?(\bCONFLICT\b)[^$]*)$', txt, re.I | re.MULTILINE) \
or re.findall(r'^(.*?(\berror\b)[^$]*)$', txt, re.I)
return b_error | 777469746ba999abd36ad1228d53f25a0a9cadaf | 685,987 |
import shutil
def del_dir(directory):
"""Delete directory"""
try:
shutil.rmtree(directory)
return 0
except FileExistsError:
return 1 | cc0df5c61eea1979e44b74909cbb479a68c9d5a0 | 685,988 |
def writeonly(func):
"""Marks an API function as write-only"""
func._write_only_ = True
return func | c53124fb28668ddf0750ea5646633963effbf7d2 | 685,991 |
def calc_l2distsq(x, y):
"""
Calculate L2 distance between tensors x and y.
"""
d = (x - y)**2
return d.view(d.shape[0], -1).sum(dim=1) | 1e8689ef4605a3a23f9aef0e1e0cbd1a2546851e | 685,994 |
def parse_string_to_list(line):
"""Parse a line in the csv format into a list of strings"""
if line is None:
return []
line = line.replace('\n', ' ')
line = line.replace('\r', ' ')
return [field.strip() for field in line.split(',') if field.strip()] | e1b5346c77be8a870b11fa880ae31b985cae1aa8 | 685,996 |
from typing import Optional
import re
def parseResourceId(resource_uri: str, resource_type: str) -> Optional[str]:
"""Parses the resource ID of the given type from the given URI, or returns None if not found."""
matches = re.search('{}/([^/]+)'.format(resource_type), resource_uri)
if not matches:
return None
return matches.group(1) | fffc96ffda64266cc7cebd7606006702b5bb1150 | 686,002 |
def _get_manifest_path(repository, manifest=None):
"""Return the path for a manifest, or list of manifests if manifest is empty.
"""
if manifest:
return '/acr/v1/{}/_manifests/{}'.format(repository, manifest)
return '/acr/v1/{}/_manifests'.format(repository) | 3f6d6e368805651fc78fefd64ea51d081aaeb77d | 686,003 |
def replicate_z_samples(t, n_z_samples):
"""Replicates a tensor `n_z_samples` times on a new first dim."""
return t.unsqueeze(0).expand(n_z_samples, *t.shape) | 9285dddb7029e05382a46613ce4cf0d3b7244393 | 686,005 |
def make_lookup_phrase(row):
"""Return full name and address for google geo api text search."""
address_text = "{} {}".format(row[1], row[2])
return address_text | b361fab034f05602d6902cc4d36bd823f2301fc1 | 686,007 |
from typing import Tuple
def _func_to_class_and_method(fn) -> Tuple[str, str]:
"""Returns the names of the function's class and method."""
split = fn.__qualname__.split('.')
if len(split) >= 2:
class_name = split[-2]
method_name = split[-1]
else:
module_name = fn.__module__
class_name = module_name.split('.')[-1]
method_name = fn.__name__
return class_name, method_name | 1549230164fd1d923fcd3f475ba4626893f71081 | 686,009 |
def hex_to_rgb(_hex):
"""
Convert a HEX color representation to an RGB color representation.
hex :: hex -> [000000, FFFFFF]
:param _hex: The 3- or 6-char hexadecimal string representing the
color value.
:return: RGB representation of the input HEX value.
:rtype: tuple
"""
_hex = _hex.strip('#')
n = len(_hex) // 3
if len(_hex) == 3:
r = int(_hex[:n] * 2, 16)
g = int(_hex[n:2 * n] * 2, 16)
b = int(_hex[2 * n:3 * n] * 2, 16)
else:
r = int(_hex[:n], 16)
g = int(_hex[n:2 * n], 16)
b = int(_hex[2 * n:3 * n], 16)
return r, g, b | ea3e9eb18a1811077b095b766fc5e179e048c0ba | 686,011 |
def check_streams(streams='*'):
"""
Checks that the streams given are a list containing only possible streams, or is all streams - '*'.
"""
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
if isinstance(streams, list):
unrecognised_streams = list(set(streams) - set(possible_streams))
if len(unrecognised_streams) == 0:
return streams
else:
unrecognised_streams_to_print = ''.join(["'"+stream+"', " for stream in unrecognised_streams])[:-2]
raise ValueError(f"Streams {unrecognised_streams_to_print} could not be recognised, must be one of: {', '.join(possible_streams)}")
elif streams=='*':
return possible_streams
else:
raise ValueError(f"Streams could not be recognised, must be one of: {', '.join(possible_streams)}") | 3d96377b7b519e438841aab66dbc1cbdd492a1a1 | 686,013 |
import copy
def is_subsequence(seq1, seq2):
"""Check if seq1 is a subsequence of seq2
>>> is_subsequence(((2,), (3, 5)), ((2, 4), (3, 5, 6), (8,)))
True
>>> is_subsequence(((1,), (2,)), ((1, 2), (3, 4)))
False
>>> is_subsequence(((2,), (4,)), ((2, 4), (2, 4), (2, 5)))
True
"""
seq = copy.deepcopy(seq1)
for element in seq2:
if seq and set(seq[0]) <= set(element):
seq = seq[1:]
return True if not seq else False | 30b0c884123ab15358a67e01afc5bbdf4059fe3f | 686,014 |
def truncate(source, max_len: int, el: str = "...", align: str = "<") -> str:
"""Return a truncated string.
:param source: The string to truncate.
:param max_len: The total length of the string to be returned.
:param el: The ellipsis characters to append to the end of the string if it exceeds max_len.
:param align: The alignment for the string if it does not exceed max_len.
:return: The truncated string.
"""
if type(source) is int:
source = str(source)
if source is not None and len(source) > 0:
if len(source) < max_len:
return source
elif max_len < len(el) + 1:
return "{s:{c}{a}{n}}".format(s=source[0], c=".", a=align, n=max_len)
else:
return source[:max_len - len(el)] + el if len(source) > max_len else source
else:
return "" | fae74fa46f1e3aaf06c9b2d7cf4be6f31fce2596 | 686,015 |
def is_parser_function(string):
"""Return True iff string is a MediaWiki parser function."""
# see https://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
return string.startswith('#') # close enough for our needs | aa729ecade5db57870535a6acf0a366541fb64b4 | 686,016 |
def get_scores(database, person, multi=True):
"""
Return a string representing the scores a person has in the database.
The parameter `multi' is to specify whether the scores are used for
displaying the scores of a single person or multiple people.
"""
indent = ' ' if multi else ''
return ''.join(
indent + '{score}: {points}\n'.format(score=score, points=points)
for score, points in database[person].items()
) | 1f3320473dbef113b6aeed0a84d745b852f86008 | 686,017 |
def split_b64_file(b64_file):
"""Separate the data type and data content from a b64-encoded string.
Args:
b64_file: file encoded in base64
Returns:
tuple: of strings `(content_type, data)`
"""
return b64_file.encode('utf8').split(b';base64,') | 011a5d5f38b8ba2914fb89439314081949d3113b | 686,018 |
def _dequantized_var_name(var_name):
"""
Return dequantized variable name for the input `var_name`.
"""
return "%s.dequantized" % (var_name) | 5bb8747a5681ed6bd4dce8ba726c260f8b32bace | 686,020 |
def arcs2d(arcs):
"""Convert arcseconds into degrees."""
return arcs / 3600.0 | 0570463fa2c0a2723959f43767319b9a8d6c75e4 | 686,024 |
def hamming_distance(a, b):
"""Returns the Hamming distance between two strings of equal length"""
try:
assert len(a) == len(b)
return sum(i != j for i, j in zip(a, b))
except AssertionError as error:
print('Barcode lengths are not equal for {}. {}'.format(a, b))
raise(error) | f7fef64e7030faea466aa9e60a45f98807218cd0 | 686,025 |
def tf_binary(dtm):
"""
Transform raw count document-term-matrix `dtm` to binary term frequency matrix. This matrix contains 1 whenever
a term occurred in a document, else 0.
:param dtm: (sparse) document-term-matrix of size NxM (N docs, M is vocab size) with raw term counts.
:return: (sparse) binary term frequency matrix of type integer of size NxM
"""
if dtm.ndim != 2:
raise ValueError('`dtm` must be a 2D array/matrix')
return (dtm > 0).astype(int) | 7df0af552f70e8e38d86b01de6871d6db8b79b35 | 686,027 |
def get_slurm_url(slurm_config: dict, url_type: str):
"""get_slurm_nodes_url Get Slurm Nodes Url
Get the url for reading nodes info from slurm
Args:
slurm_config (dict): Slurm Configuration
url_type: Url type. nodes or jobs
"""
base_url = f"http://{slurm_config['ip']}:{slurm_config['port']}"
url_types = ['nodes', 'jobs']
if url_type not in url_types:
raise ValueError(f"Invalid url type. Expected one of: {url_types}")
if url_type == 'nodes':
url = f"{base_url}{slurm_config['slurm_nodes']}"
else:
url = f"{base_url}{slurm_config['slurm_jobs']}"
return url | 3474688f0f0b53ec3b15864ecd76e4d872493131 | 686,028 |
def unzip(iterable):
"""
Unzip iterable of tuples into tuple of iterables
:param iterable: Any iterable object yielding N-tuples
:return: A N-tuple of iterables
"""
return zip(*iterable) | 7029d87b55febbfedeae87c4471891c4a54e3885 | 686,030 |
def make_matrix(num_rows, num_cols, entry_fn):
"""构造一个第 [i, j] 个元素是 entry_fn(i, j) 的 num_rows * num_cols 矩阵"""
return [
[
entry_fn(i, j) # 根据 i 创建一个列表
for j in range(num_cols)
] # [entry_fn(i, 0), ... ]
for i in range(num_rows)
] | e76598f8b87a50b99da6214cd18f37479ef64724 | 686,033 |
def get_id_from_url(url: str) -> str:
"""Get the id of the image from the url.
The url is of the format https://sxcu.net/{image_id},
so we simply split the url by `/` and return the last part.
Parameters
----------
url : str
The original url.
Returns
-------
str
The id of the image.
"""
sp = url.split("/")
return sp[-1] | 31abdbcbac9c05c1b396b0b6d7c578cf0a7b3b9c | 686,034 |
def extract_var_key(raw_line: str, var_id: str) -> str:
"""
Extract the key from an line in the form "dict['key']" or
"dict.get('key', *args)".
"""
line = raw_line.strip()[len(var_id) :]
state_var = ""
if line[0] == "[":
state_var = line[2:-2]
elif line[0:4] == ".get":
call = line.split("(")[1]
call = call.split(")")[0]
call = call.strip()
sep = "'"
if call[0] == '"':
sep = '"'
state_var = [el for el in call.split(sep) if len(el) > 0][0]
return state_var | 425a4fee878b262c52cb744ae93cd0730416ea37 | 686,036 |
def inherits_from(obj, a_class):
"""
Function that determine if a class is an inherited class.
Args:
obj (object any type): The object to analyze.
a_class (object any type): The reference object.
Returns:
Returns True if the object is an instance of a class that
inherited (directly or indirectly) from the specified class ;
otherwise False.
"""
if type(obj) is not a_class:
return isinstance(obj, a_class)
return False | 40152435e607594a628be05615a76471d948c0b3 | 686,037 |
def get_resource_by_path(path, resources):
"""gets the resource that matches given path
Args:
path (str): path to find
resources (list(str)): list of resources
Returns:
dict: resource that matches given path, None otherwise
"""
return next(
(x for x in resources if x['path'] == path),
None) | 6af716de44b8450ad01b2be402aae12aeb63e4f5 | 686,038 |
def get_vigra_feature_names(feature_names):
"""
For the given list of feature names, return the list of feature names to compute in vigra.
Basically, just remove prefixes and suffixes
For example: ['edge_vigra_mean', 'sp_vigra_quantiles_25'] -> ['mean', 'quantiles']
"""
feature_names = list(map(str.lower, feature_names))
# drop prefixes and quantile suffixes like '_25'
vigra_feature_names = [name.split('_')[2] for name in feature_names]
# drop duplicates (from multiple quantile selections)
return list(set(vigra_feature_names)) | ca73810bb66b092046f83e39ccc54a1beb395c36 | 686,039 |
def default_wire_map(ops):
"""Create a dictionary mapping used wire labels to non-negative integers
Args:
ops Iterable[Operation]
Returns:
dict: map from wires to sequential positive integers
"""
# Use dictionary to preserve ordering, sets break order
used_wires = {wire: None for op in ops for wire in op.wires}
return {wire: ind for ind, wire in enumerate(used_wires)} | 30357e173d9e19a69a142c0070d51d6b96de125d | 686,043 |
def bestMutation(shape, model, cycles=50, startHeat=100, heatDiv=1.01, alpha=.5):
"""
Mutates a shape for a given number of cycles and returns the best scoring change
:param shape: The shape to mutate
:param model: The model object
:param cycles: The number of cycles (attempts at mutation)
:param startHeat: The initial maximum random number which a point can change by
:param heatDiv: The amount to divide the heat by every time the shape mutates into a better position
:param alpha: The alpha value to use when calculating color
:return: An array representing the best change. [score, color, replacement, bounds]
"""
bestShape = shape
score, color, replacement, bounds = model.scoreShape(bestShape, alpha)
bestChange = [score, color, replacement, bounds]
curHeat = startHeat
for j in range(cycles):
bestShape.mutate(heat=curHeat)
score, color, replacement, bounds = model.scoreShape(bestShape, alpha)
change = [score, color, replacement, bounds]
if score > bestChange[0]:
bestChange = change
curHeat = int(curHeat / heatDiv)
curHeat = max(curHeat, 10)
else:
bestShape.undoMutate()
return bestChange | c064f803b755cbe0e97923d3bf5245e8793ccbbd | 686,044 |
def allow_timefloor(submitmode):
"""
Should the timefloor mechanism (multi-jobs) be allowed for the given submit mode?
:param submitmode: submit mode (string).
"""
return True | 7659db621177ad2dcd53cf2198555915d2dc8c43 | 686,046 |
def get_base_worker_instance_name(experiment):
"""GCE will create instances for this group in the format
"w-|experiment|-$UNIQUE_ID". 'w' is short for "worker"."""
return 'w-' + experiment | 81ecd22a33608e1c2aafb97905684311c6b33241 | 686,047 |
import torch
def objective(expec_t, look_back=20000):
"""
Creates the objective function to minimize.
Args:
expec_t (list): time-dependent quantum yield
look_back (int): number of previous time steps
over which to average the yield
Returns:
obj (torch.Tensor): objective function
Note:
20,000 time steps = 1 ps, since dt = 0.05 fs,
so this defaults to averaging the QY over 1 ps.
"""
# want to maximize quantum yield (i.e. minimize its negative)
obj = -torch.mean(torch.cat(expec_t)[-look_back:])
return obj | ba023a48c462225814325e8c823ae527e51da10f | 686,050 |
def compare_dict_keys(dict_1, dict_2):
"""Check that two dict have the same keys."""
return set(dict_1.keys()) == set(dict_2.keys()) | 47bb95d242a968fafbef17a960bbd4724ddaa02e | 686,053 |
def get_price_estimate(client, coordinates):
"""Returns the price estimate data for the given `coordinates`.
:param client: :class:`UberRidesClient <UberRidesClient>` object.
:param client: :class:`Coordinates <Coordinates>` object.
:return: price estimate data
:rtype: list of dictionaries
"""
return client.get_price_estimates(
start_latitude=coordinates.start_latitude,
start_longitude=coordinates.start_longitude,
end_latitude=coordinates.end_latitude,
end_longitude=coordinates.end_longitude
).json['prices'] | bdbe3d03cddb9649a6a0c942e7ed131a732b1414 | 686,059 |
from bs4 import BeautifulSoup
def slide_factory(tag, id: int, classes: list = []):
"""
Factory for creating slides from a given Beautifulsoup tag, id, and list of classes to use for the slide.
Args:
tag (_type_): The tag to put into the slide.
id (int): The id of the slide (used for the Javascript on the page).
classes (list, optional): What classes should be on the HTML of the slide tag. Defaults to [].
Returns:
_type_: The Beautifulsoup slide tag.
"""
soup = BeautifulSoup()
slide = soup.new_tag(
"div", **{"class": "slide " + " ".join(classes), "id": "slide{}".format(id)}
)
slide.append(tag)
return slide | 5a3b4d38f390fe32d753bbafaab35edf00d4fab9 | 686,063 |
import re
def remove_optional_regex(pattern, name):
"""Removes an optional part of the regex by capture name
Must be of the format '(?:[anything](?P<[name]>[anything])[anything])?'
"""
return re.sub("\(\?:[^(]*\(\?P<{}>[^)]*\)[^)]*\)\?".format(name), "",
pattern) | 663f98b0bb4f178105895a3d9773b4626ec55f67 | 686,064 |
def column_selection(names, columns):
"""
select the columns that contain any of the value of names
Args:
names (TYPE): DESCRIPTION.
columns (TYPE): DESCRIPTION.
Returns:
features (TYPE): DESCRIPTION.
"""
features = []
for col in columns:
if any([name in col for name in names]):
features.append(col)
return features | ceec497ea8753636d492633a3b0e72d7a7154bd8 | 686,065 |
def _clean_key(root_key, filekey):
"""Return the cleaned key 'filekey', using 'root_key' as the root
Args:
root_key (str): root_key for accessing object store
filekey (str): filename to access in object store
Returns:
str: Location to access the file
"""
if root_key:
return "%s/%s" % (str(root_key), str(filekey))
else:
return str(filekey) | bf39e95a182506900b0cdb110f1fea2566e85520 | 686,068 |
def get_pipe_parent(job):
"""Check if the job has a pipe_from parent and if so return that. If
the does does not have any pipe targets, the job itself is returned.
:param job: the job
:type job: :class:`jip.db.Job`
:returns: pipe source job or the job itself if no pipe parent is found
"""
if len(job.pipe_from) > 0:
## walk up and add this jobs dependencies
j = job
while len(j.pipe_from) > 0:
j = j.pipe_from[0]
return get_pipe_parent(j)
return job | 62195fa0b1b83bc778e780d7e40e66519754cb81 | 686,069 |
def _shift_twelve(number):
"""Shifts the number by 12, if it is less than 0.
Parameters
----------
number : int
Returns
-------
int
"""
return number + 12 if number < 0 else number | cca6689a7caabbaae0e352b4e91b64ebb1f63ad7 | 686,070 |
def get_affiliate_code_from_request(request):
"""
Helper method that gets the affiliate code from a request object if it exists
Args:
request (django.http.request.HttpRequest): A request
Returns:
Optional[str]: The affiliate code (or None)
"""
return getattr(request, "affiliate_code", None) | b0ccd20442ff23660a487451a9a379e44a359786 | 686,074 |
def nub(l, reverse=False):
"""
Removes duplicates from a list.
If reverse is true keeps the last duplicate item
as opposed to the first.
"""
if reverse:
seen = {}
result = []
for item in reversed(l):
if item in seen: continue
seen[item] = 1
result.append(item)
return reversed(result)
else:
seen = {}
result = []
for item in l:
if item in seen: continue
seen[item] = 1
result.append(item)
return result | 77591958634f0ef6ff2463616d7ab5507e32c272 | 686,076 |
def _validate_bool(value, default):
"""Validate a boolean value parsed from the config file.
@type value: any
@param value: Raw value read from config.
@type default: bool
@param default: Default value to use if the input value isn't valid.
@rtype: bool
@return: Value to use.
"""
if value is not None:
if type(value) is bool:
return value
return default | d14e733086d9e7dc79504086bcbde098a44874ad | 686,078 |
def diff21(n):
"""
dado um inteiro n retorna a diferença absoluta entre n e 21
porém se o número for maior que 21 retorna o dobro da diferença absoluta
diff21(19) -> 2
diff21(25) -> 8
dica: abs(x) retorna o valor absoluto de x
"""
if n > 21:
return abs(n - 21) * 2
return abs(n - 21) | 73746d33afa22c3fc2ce324098a3ea60de7a88ea | 686,079 |
def write_score_summary(scores, analogy_types, filename):
"""
Write score summary to a string
:param scores: list of pairs
(number of correctly answered questions in category, number of questions in category)
:param analogy_types:
:param filename:
:return: score summary (string)
"""
# Helper: column width in printout
w = '120'
# Header
out = '-' * int(w) + '\n' + filename + ': score summary' + '\n' + '-' * int(w) + '\n'
out += '{:<65}\t{:>12}\t{:>12}\t{:>15}\n'.format(
'Analogy type', '#corr answ', '#questions', 'corr answ [%]')
# Score summary
for i, analogy_type in enumerate(analogy_types):
s = scores[i][0]
nq = scores[i][1]
perc = s * 100.0 / nq
# Write the line containing all information pertaining to this analogy type
out += '{:<65}\t{:>12}\t{:>12}\t{:>15.4g}\n'.format(analogy_type, s, nq, perc)
return out | 0c013095ef5b0082dcb54bdf8d2497b0dc282610 | 686,080 |
def empty_statement(_evaluator, _ast, _state):
"""Evaluates empty statement."""
return None, False | 92748988c357a60a8886cb1e9d319f1c42ccf178 | 686,082 |
from pathlib import Path
def oss_artifact() -> Path:
"""
Return the path to a build artifact for DC/OS OSS master.
"""
return Path('/tmp/dcos_generate_config.sh') | 66b82ef40d8f7c98cd27d3805b206a6634e5486d | 686,083 |
def two_sum(arr, target):
"""Function takes in two args, the input array
and the target.
T: O(n) - We loop through the array once
S: O(n) - We use an extra data structure to store the target-curr
:param arr: Input array
:param target: Target
:return: list of two numbers, else empty arr
"""
n = len(arr)
res = []
if n == 0 or n == 1:
return res
sum_dict = {}
for i in range(n):
curr_diff = target-arr[i]
sum_dict[curr_diff] = i
if arr[i] in sum_dict and sum_dict[arr[i]]!=i:
res += [arr[i], curr_diff]
break
return res | 469b63e97bb346a4d344682f19e84f1f5c979e1b | 686,087 |
import torch
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
total_reciprocal_rank = 0
for i in range(indices.size(0)):
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
total_reciprocal_rank += 1.0 / (j + 1)
break
return total_reciprocal_rank / logits.size(0) | 726f9b9171bc5de81e86693db7502b8b4c50df88 | 686,090 |
def update_query_object(query, data, exceptions=[]):
"""Iterates over given data object. Set attributes to SQLAlchemy query.
Args:
query (obj): SQLAlchemy query object
data (obj): Given request's arguments from JSON
exceptions (list): Keys for which iteration
should be skipped
Returns:
SQLAlchemy object: Updated query object
"""
for (k, v) in data.items():
if k in exceptions:
continue
query.__setattr__(k, v)
return query | d838844364097f2c6e8c97884988f72e6c84e0c0 | 686,092 |
def pv_f(fv, r, n):
"""Objective: estimate present value
fv : = future value
r : = rate of discount
n : = num of iterations (usually num of years, num of months etc)
fv
formula : pv = --------
(1 + r) ^ n
"""
pv = 0
iCount = 0
for cash in fv:
pv = pv + cash / (1 + r) ** iCount
iCount = iCount + 1
# you can use enumeration too
pv2 = 0
for i2, cashFlow in enumerate(fv):
pv2 = pv2 + cashFlow / (1 + r) ** i2
return (pv, pv2) | 8cb1072c03125c9c9721bcf69b9ce534479a2a37 | 686,096 |
def _deep_get(instance, path):
"""
Descend path to return a deep element in the JSON object instance.
"""
for key in path:
instance = instance[key]
return instance | 3dad3eed0115c244ee60e887049747200ed1838c | 686,097 |
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
conn_err_codes = ('2002', '2003', '2006')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False | 0fb14a654443616f44f3b76eb55e3f6d04e7808c | 686,099 |
def linear_full_overlap(dep_t, dep_h):
"""Checks whether both the head and dependent of the triplets match."""
return (dep_h[0] in dep_t[0]) and (dep_h[2] in dep_t[2]) | f88820129d39e4914935633793cb274ea71a9bdb | 686,100 |
def points_2_xywh(box):
""" Converts [xmin, ymin, xmax, ymax] to [xmin, ymin, width, height]. """
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
box = [int(round(x)) for x in box]
return box | 24873f02ae4b828c0e43f139eda5b7eca360d9f9 | 686,102 |
def _find_appliance(oneandone_conn, appliance):
"""
Validates the appliance exists by ID or name.
Return the appliance ID.
"""
for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
if appliance in (_appliance['id'], _appliance['name']):
return _appliance['id'] | 565a996fec27389c93e857e848ae971ba2bdc709 | 686,104 |
import time
def get_datetime_string(format_string='%m:%d:%Y:%X'):
"""
Returns a string representation of current (local) time
:kw format_string: Format string to pass to strftime (optional)
"""
return time.strftime(format_string, time.localtime()) | 62932e0c4ceea7c5c8078404681786caec3617b0 | 686,106 |
from functools import reduce
def sanitize_for_latex(text):
""" Sanitzes text for use within LaTeX. Escapes LaTeX special characters in
order to prevent errors.
"""
escape = '%_&~'
replacers = (lambda s: s.replace(e, r'\{}'.format(e)) for e in escape)
return reduce(lambda s, f: f(s), replacers, text) | 817cc1497aafd46694165808d95fc2cb1b764a3d | 686,107 |
def make_free_energy_lines(condition_lambda0,
condition_lambda1,
alchemical_molecule,
lambda_step,
starting_lambda=0,
couple_intramol='no',
sc_alpha=0.5,
sc_coul='no',
sc_sigma=0.25,
sc_power=1,
nstdhdl=50,
separate_dhdl_file='yes',
free_energy='yes'):
"""Get the free energy lines of a mdp_file
Parameters
------------
condition_lambda0 : str
the condition of the sistem for lambda=0
the options are 'vdw-q', 'vdw', 'none'
condition_lambda1 : str
the condition of the sistem for lambda=1
the options are 'vdw-q', 'vdw', 'none'
alchemical_molecule : str
the residue name of the molecule that will be
annihilated/created
lambda_step : float
how much lambda will increase/decrease each timestep
(can be both positive and negative)
starting_lambda : int, default=0
the starting value of lambda (usually 1 or 0)
couple_intramol : str, optional, default='no'
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
sc_alpha : float, optional, default=0.5
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
sc_coul : str, optional, default='no'
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
sc_sigma : float, optional, default=0.25
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
sc_power : int, optional, default=1
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
nstdhdl : int, optional, default=50
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
separate_dhdl_file : str, optional, default='yes'
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
free_energy : str, optional, default='yes'
check the mdp documentation on the gromacs website
don't use this options if you don't know what you are doing
Returns
------------
list of str
the lines of the mdp file (newline missing)
Notes
----------
A good convention but it's not compulsory is that during creation
lambda goes from 0 -> 1 and during annihilation 1 -> 0
"""
lines = [
'; Free energy control stuff',
f'free-energy = {free_energy}',
f'init-lambda = {starting_lambda}',
f'delta-lambda = {lambda_step}',
f'couple-moltype = {alchemical_molecule}',
f'couple-lambda0 ={condition_lambda0}',
f'couple-lambda1 ={condition_lambda1}',
f'couple-intramol ={couple_intramol}',
f'sc-alpha = {sc_alpha}',
f'sc-coul = {sc_coul}',
f'sc-sigma = {sc_sigma}',
f'sc-power = {sc_power}',
f'nstdhdl = {nstdhdl}',
f'separate-dhdl-file = {separate_dhdl_file}', ''
]
return lines | 66ab029b8482b8552145313251351b89c76cde6d | 686,110 |
def get_gdrive_id(url):
"""
Returns the gdrive ID by splitting with the delimiter "/" and getting the element on index 5 which is usually the gdrive ID for a file or folder
Requires one argument to be defined:
- The gdrive link (string)
"""
url = url.split("/")[5]
if "?" in url: url = url.split("?")[0]
return url | 27969ecaf04ccc28c352ccd6ce772bf3b6a8bcd4 | 686,111 |
from typing import List
import re
def split_quoted_string(string_to_split: str, character_to_split_at: str = ",", strip: bool = False) -> List[str]:
"""
Split a string but obey quoted parts.
from: "first, string", second string
to: ['first, string', 'second string']
Parameters
----------
string_to_split: str
string to split in parts
character_to_split_at: int
character to split the string at
strip: bool
strip each part from white spaces
Returns
-------
list: separated string parts
"""
if not isinstance(string_to_split, str):
return [string_to_split]
parts = list()
for string_part in re.split(rf"{character_to_split_at}(?=(?:[^\"']*[\"'][^\"']*[\"'])*[^\"']*$)", string_to_split):
if strip is True:
string_part = string_part.strip()
parts.append(string_part)
return parts | eb85145bfe65c6f7ab0d49b1fb7ce31d393404c0 | 686,112 |
def postprocess(path,line1,line2):
"""return data as an array...
turn data that looks like this:
100 0.98299944 200 1.00444448 300 0.95629907
into something that looks like this:
[[100, 0.98299944], [200, 1.00444448], [300, 0.95629907], ... ]"""
y = []
data = open(path, 'rU').readlines()
subdata = data[line1:line2]
xx = []; yy = []
for d in subdata:
xy = d.split()
for (j,x) in enumerate(xy):
if j%2: yy += [x]
else: xx += [x]
data = []
z = zip(xx,yy)
for (x,y) in z:
a = [ int(x), float(y) ]
data += [ a ]
return data | 6d6118741d2ebf49d0d7326a3c2c4ae35092a973 | 686,113 |
import re
def ValidateKeyId(key_id):
"""Ensures a key id is well structured."""
# Keys are hexadecimal
return re.match(r'[a-z0-9]+', key_id) | b7705d33135dfff38e48da0426eae339c45470d8 | 686,114 |
def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}"
for name, rgb in data["custom colors"].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2]))
return definitions | 1341f99b4cf39a20a675d0473bd8f381216f4384 | 686,118 |
def create_multiset_format(n, rs, pfs):
"""
Function that converts the given n, rs and pfs into a multiset format.
Arguments
=========
n: Total length of the permutations
rs: How many of each non-X elements should be present within each permutation
pfs: The value of each non-X prime factor to order
Example:
n = 6, rs = (1,3), pfs = (3,5)
multiset = [3,5,5,5,X,X]
The set is represented as a list here, because the permutation generatior expects this.
"""
multiset = []
# Extend the multiset with given r's
for i, r in enumerate(rs):
multiset.extend([str(pfs[i])]*r)
# If sum(rs) < n we append with 'X' until len(multiset) == n
if sum(rs) < n:
diff = int(n - sum(rs))
multiset.extend(['X']*diff)
if sum(rs) > n:
raise NotImplementedError
return multiset | 0b69e88efdc35e6c5d9f2f6d30431a725f97286a | 686,121 |
import logging
def is_db_user_superuser(conn):
"""Function to test whether the current DB user is a PostgreSQL superuser."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolsuper
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0] | ee3038d79c354e76e9383011d4a2d515b959e793 | 686,124 |
from typing import Sequence
from typing import Tuple
def my_quat_conjugate(q: Sequence[float]) -> Tuple[float,float,float,float]:
"""
"invert" or "reverse" or "conjugate" a quaternion by negating the x/y/z components.
:param q: 4x float, W X Y Z quaternion
:return: 4x float, W X Y Z quaternion
"""
return q[0], -q[1], -q[2], -q[3] | 821e96dff3e65f4f578bb1ad626bac91a06f7401 | 686,126 |
from typing import Iterable
from typing import Sequence
def arg_str_seq_none(inputs, name):
"""Simple input handler.
Parameters
----------
inputs : None, string, or iterable of strings
Input value(s) provided by caller
name : string
Name of input, used for producing a meaningful error message
Returns
-------
inputs : None, or list of strings
Raises
------
TypeError if unrecognized type
"""
if isinstance(inputs, str):
inputs = [inputs]
elif isinstance(inputs, (Iterable, Sequence)):
inputs = list(inputs)
elif inputs is None:
pass
else:
raise TypeError('Input %s: Unhandled type %s' % (name, type(inputs)))
return inputs | 8077c6faf36784274856da8fbd2ea031c98170e0 | 686,133 |
from typing import MutableMapping
from typing import Any
def flatten(
dictionary: MutableMapping[Any, Any], separator: str = ".", parent_key=False
):
"""
Turn a nested dictionary into a flattened dictionary
Parameters:
dictionary: dictionary
The dictionary to flatten
parent_key: boolean
The string to prepend to dictionary's keys
separator: string
The string used to separate flattened keys
Returns:
A flattened dictionary
"""
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
if hasattr(value, "items"):
items.extend(
flatten(
dictionary=value, separator=separator, parent_key=new_key
).items()
)
elif isinstance(value, list):
for k, v in enumerate(value):
items.extend(flatten({str(k): v}, new_key).items())
else:
items.append((new_key, value))
return dict(items) | d12c668e317ab10df1f9c6e4bb4da76767130327 | 686,134 |
def ramp(x):
"""smooth ramp function from curl noise paper"""
if x > 1.0:
return 1.0
elif x < -1.0:
return -1.0
else:
return 15.0 / 8.0 * x - 10.0 / 8.0 * x * x * x + 3.0 / 8.0 * x * x * x * x * x | 2cfe84498bfbf54120b3a204dbfe17050ef6615f | 686,135 |
import statistics
def score(text, *score_functions):
"""Score ``text`` using ``score_functions``.
Examples:
>>> score("abc", function_a)
>>> score("abc", function_a, function_b)
Args:
text (str): The text to score
*score_functions (variable length argument list): functions to score with
Returns:
Arithmetic mean of scores
Raises:
ValueError: If score_functions is empty
"""
if not score_functions:
raise ValueError("score_functions must not be empty")
return statistics.mean(func(text) for func in score_functions) | 26947c68ddc76f16c783a8e84e683645cfaf7787 | 686,136 |
def line_lister(line):
""" Returns a a list of stripped line split on ','. """
return [li.strip() for li in line.split(',')] | 9e58acfb7aed6786d5c5199106a3aff0e587c8aa | 686,142 |
def ip_key(key):
"""Function to make a 'canonical' IP string for sorting.
The given IP has each subfield expanded to 3 numeric digits, eg:
given '1.255.24.6' return '001.255.014.006'
"""
ip = key[0]
fields = ip.split('.')
result = []
for f in fields:
result.append('%03d' % int(f))
return result | 4cf8e887b32c123060e3899d6eb5e48d08329d34 | 686,143 |
def to_camel_case(snake_case):
""" convert a snake_case string to camelCase """
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:]) | e7433ec546cc93700bd67bd4e559795a9add31ed | 686,144 |
import re
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) | 6003836c813eb9c29213f91a7ca1592073b80a17 | 686,147 |
def _ValidateReplicationFlags(flag_dict):
"""Verifies correct usage of the bigtable replication flags."""
return (not flag_dict['bigtable_replication_cluster'] or
flag_dict['bigtable_replication_cluster_zone']) | 1462455171f631cf18dc2e09650df2322d451dd6 | 686,151 |
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
# Arguments
text: Input text (string).
filters: Sequence of characters to filter out.
lower: Whether to convert the input to lowercase.
split: Sentence split marker (string).
# Returns
A list of words (or tokens).
"""
if lower: text = text.lower()
translate_map = str.maketrans(filters, split * len(filters))
seq = text.translate(translate_map).split(split)
return [i for i in seq if i] | 120e03f5a4849c83bb1f686f08fe5316f142b899 | 686,152 |
def get_grants(df):
"""Get list of grant numbers from dataframe.
Assumptions:
Dataframe has column called 'grantNumber'
Returns:
set: valid grant numbers, e.g. non-empty strings
"""
print(f"Querying for grant numbers...", end="")
grants = set(df.grantNumber.dropna())
print(f"{len(grants)} found\n")
return list(sorted(grants)) | 0e3c5ab5444e07360310d2f9baf2de2dc60a8dcd | 686,153 |
from typing import Optional
from pathlib import Path
def join_paths(path1: Optional[str] = "", path2: Optional[str] = "") -> str:
"""
Joins path1 and path2, returning a valid object storage path string.
Example: "/p1/p2" + "p3" -> "p1/p2/p3"
"""
path1 = path1 or ""
path2 = path2 or ""
# combine paths and ensure the resulting path does not start with "/" char and
path = f"{path1.rstrip('/')}/{path2}".lstrip("/")
if len(path) > 0:
# convert to Posix
return Path(path).as_posix()
return path | 828a6cb6800475ead52c418d8d67cf9294b42e44 | 686,157 |
import struct
def seq_to_bytes(s):
"""Convert a sequence of integers to a *bytes* instance. Good for
plastering over Python 2 / Python 3 cracks.
"""
fmt = "{0}B".format(len(s))
return struct.pack(fmt, *s) | 4fdb239b83afeec0e7caaf7c92d55835bb072312 | 686,161 |
def parse_int_ge0(value):
"""Returns value converted to an int. Raises a ValueError if value cannot
be converted to an int that is greater than or equal to zero.
"""
value = int(value)
if value < 0:
msg = ('Invalid value [{0}]: require a whole number greater than or '
'equal to zero')
raise ValueError(msg.format(value))
else:
return value | 699c4a23abc23edffb6168ce9e0d6c8381506a3a | 686,164 |
import string
def remove_punct(value):
"""Converts string by removing punctuation characters."""
value = value.strip()
value = value.translate(str.maketrans('', '', string.punctuation))
return value | 604b8ac8ea6df1bb9319372fcf091cc4c80f0e27 | 686,171 |
import pickle
def default_model_read(modelfile):
"""Default function to read model files, simply used pickle.load"""
return pickle.load(open(modelfile, 'rb')) | 61946419d5905422da3025fabee02dc388c2568a | 686,178 |
def _variant_genotypes(variants, missing_genotypes_default=(-1, -1)):
"""Returns the genotypes of variants as a list of tuples.
Args:
variants: iterable[nucleus.protos.Variant]. The variants whose genotypes we
want to get.
missing_genotypes_default: tuple. If a variant in variants doesn't have
genotypes, this value is returned. The default value is (-1, -1), the
standard representation for "missing" diploid genotypes.
Returns:
list[nucleus.protos.Variant] protos in the same order as variants.
"""
return [
tuple(v.calls[0].genotype) if v.calls else missing_genotypes_default
for v in variants
] | 0e5c280614a5b630406e17bfca36300ff6186226 | 686,180 |
import struct
def byte(number):
"""
Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
representation.
Use it as a replacement for ``chr`` where you are expecting a byte
because this will work on all versions of Python.
Raises :class:``struct.error`` on overflow.
:param number:
An unsigned integer between 0 and 255 (both inclusive).
:returns:
A single byte.
"""
return struct.pack("B", number) | a09b43e037307d6963b7718b079ff93b2222d859 | 686,181 |
import random
def generate_events(grid_size, event_type, probability, event_max):
"""
Generate up to 'number' of events at random times throughout the night.
Return events as a list of lists containing the event type and time grid index at which the event occurs.
Example
-------
>>> events = events(120, 'Target of Opportunity', 0.1, 4)
>>> print(events)
[['Target of Opportunity', 50], ['Target of Opportunity', 20]]
Parameters
----------
grid_size : int
number of discrete time vales throughout observing window.
event_type : str
type of event ('Target of Opportunity' or 'Condition change' for sky conditions changes).
probability : float
probability of an event occurring.
event_max : int
number of potential events.
"""
verbose = False
nt = grid_size
p = probability
n = event_max
if verbose:
print('\nGenerating random events:')
print('grid_size', nt)
print('type', event_type)
print('probability', p)
print('number', n)
events = []
if p > 0.: # if event have probability greater than zero
for i in range(n):
random_num = random.random() # 'roll the dice' for ToO (number in range [0,1))
if verbose:
print('random_num', random_num)
if random_num <= p: # if roll is >= probability, generate event.
event_grid_index = random.randint(0, nt - 1) # random time grid index somewhere in the night.
events.append([event_type, event_grid_index]) # save event type and time grid index.
if verbose:
print('added event:', [event_type, event_grid_index])
return events | 8c432b3e7a2fa22775f8b25068763c1f397f65a2 | 686,184 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.