content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def is_anon_user(user):
"""
Determine if an user is anonymous or not.
"""
return user.username[0:10] == 'anon_user_' | 1261ed9b8f807d8ed195643f3d507128cac3189d | 693,288 |
def identity(x):
"""Transparent function, returning what's been input"""
return x | 8e443dfaaf1c0f0e9b07aaf8cf631a1dd5137b19 | 693,291 |
def merge_dict(lhs, rhs, override=True):
"""Merge nested right dict into left nested dict recursively.
:param lhs: dict to be merged into.
:type lhs: dict
:param rhs: dict to merge from.
:type rhs: dict
:param override: the value in rhs overide the value in left if True.
:type override: boolean
"""
if not isinstance(lhs, dict) or not isinstance(rhs, dict):
if override:
return rhs
else:
return lhs
for key, value in rhs.items():
if key not in lhs:
lhs[key] = rhs[key]
else:
lhs[key] = merge_dict(lhs[key], value, override)
return lhs | c31815669d767965d7a19c55ee4331ab26011767 | 693,293 |
from typing import Union
def create(num: int, pos: int) -> Union[int, None]:
"""Creates a particle at `pos` if possible and returns the new state.
Parameters
----------
num : int or Spinstate
The number representing the binary state.
pos : int
The index of the state element.
Returns
-------
new : int or None
The newly created state. If it is not possible to create the state `None`
is returned.
Examples
--------
>>> new = create(0, pos=0) # binary: 0000
>>> binstr(new, width=4)
'0001'
>>> new = create(1, pos=0) # binary: 0001
>>> new is None
True
>>> new = create(1, pos=1) # binary: 0001
>>> binstr(new, width=4)
'0011'
"""
op = 1 << pos
if not op & num:
return num ^ op
return None | 86f3b2ca61cc02858c71b80c3145bbe2f0ecb19a | 693,297 |
import math
def factorial(x):
"""Return x factorial as an integer"""
return math.factorial(x) | 03abe171c08e510f3e126f15b8ed9f51e50744fd | 693,303 |
def process_tex(lines):
"""
Remove unnecessary section titles from the LaTeX file.
"""
new_lines = []
for line in lines:
if (
line.startswith(r"\section{gyptis.")
or line.startswith(r"\subsection{gyptis.")
or line.startswith(r"\subsubsection{gyptis.")
or line.startswith(r"\paragraph{gyptis.")
or line.startswith(r"\subparagraph{gyptis.")
):
pass # skip!
else:
new_lines.append(line)
return new_lines | 7088ce5c49df4a333702825b19f2da9000b74220 | 693,304 |
import collections
def frequent_sleeper(sleep_tracker):
"""Return ID that sleeps the most * the minute they sleep most often."""
sleepiest_guard, most_sleeps = None, 0
for guard, sleeps in sleep_tracker.items():
num_sleeps = len(sleeps)
if num_sleeps > most_sleeps:
sleepiest_guard, most_sleeps = guard, num_sleeps
sleep_counter = collections.Counter(sleep_tracker[sleepiest_guard])
sleepiest_time = sleep_counter.most_common(1)[0][0]
return sleepiest_guard * sleepiest_time | f1675f80fb0efcc32e9c8d8fc1391ad013b9c3e4 | 693,305 |
def prime_check(n):
"""Checks if natural number n is prime.
Args:
n: integer value > 0.
Returns:
A boolean, True if n prime and False otherwise.
"""
assert type(n) is int, "Non int passed"
assert n > 0, "No negative values allowed, or zero"
if n == 1:
return False
i = 2
while i*i < n + 1:
if n != i and n % i == 0:
return False
i += 1
return True | cb51cd3d4c82d0a6fa5500d2a8a655ae6760fd69 | 693,306 |
def generate_chebyshev_betas(n=10):
"""
Generate the first n beta coefficients for monic chebyshev polynomials
Source for the recurrence relation:
https://www3.nd.edu/~zxu2/acms40390F11/sec8-3.pdf, accessed 11/07/17
:param n: Number of required coefficients, must be >2
:return: List of the first n coefficients
"""
return [3.14158433] + [0.5] + [0.25] * (n - 2) | 1d05b67fbf3c00648506faebeb03649581959d77 | 693,312 |
import glob
def find_files(directory,pattern):
"""
Returns a list of files that matches the pattern in a given directory
"""
files = glob.glob(directory + '/' + pattern)
return files | 721414560553debbdd37d71dcb7d003bb86a2e24 | 693,313 |
def is_glibc_ref(s):
"""
Return True if s looks like a reference to GLIBC as typically found in
Elfs.
"""
return '@@GLIBC' in s | aa10b4685594cdfbdb8b89f56c79a4637ac06b3c | 693,317 |
def count_genres(row_df):
"""
A trade off degree based in the user genre count
:param row_df: A user dataframe row based in the genres distribution
:return: A float that is the trade off degree
"""
count = 0
for i, number in row_df.iteritems():
if number > 0.0:
count += 1
return count / len(row_df) | 1f9e90000074e057d8d286b2fc1bbe7741c9f1cc | 693,319 |
def date_match(line, pattern):
"""
If line matches pattern then return the date as an integer, else None.
"""
match = pattern.match(line)
if match:
d = "".join([m for m in match.groups() if m])
while len(d) < 12:
d = d + "00"
dt = int(d)
return dt
return | 0e92af70971db3d415652251e814629da9a69100 | 693,323 |
def recursive_update(original_dict: dict, new_dict: dict) -> dict:
"""Recursively update original_dict with new_dict"""
for new_key, new_value in new_dict.items():
if isinstance(new_value, dict):
original_dict[new_key] = recursive_update(
original_dict.get(new_key, {}), new_value
)
else:
original_dict[new_key] = new_value
return original_dict | 5c85e5fc14571fdffb88f4c4822b7e369b790bfa | 693,328 |
def all_subclasses(cls):
"""Recursively returns all the subclasses of the provided class.
"""
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants) | 82ba2bdf287ac01a23d117593b3bae20eddf9cd6 | 693,332 |
def _is_mxp_footer(line):
"""Returns whether a line is a valid MXP footer."""
return line.strip().startswith('- - - - - - - - - - - - - - - - - -') | 9f2e33b32197de7a83109093cf17c5e4c2a7e705 | 693,333 |
def hostname(fqdn):
"""Return hostname part of FQDN."""
return fqdn.partition('.')[0] | 2def763bcc2dc9112314414a9f43621235f100ff | 693,334 |
def parse_likwid_metrics(file_path,metrics,singlecore=False):
"""
Reads a single Peano output file and parses likwid performance metrics.
Args:
file_path (str):
Path to the Peano output file.
metrics (str[]):
A list of the metrics we want to read out.
singlecore (bool):
Specifies if the run was a singlecore run.
Returns:
A dict holding for each of the found metrics a nested dict that holds the following key-value pairs:
* 'Sum'
* 'Avg'
* 'Min'
* 'Max'
"""
columns = [ "Sum","Min","Max","Avg" ]
result = { }
for metric in metrics:
result[metric] = { }
for column in columns:
result[metric][column] = -1.0
try:
file_handle=open(file_path)
for line in file_handle:
for metric in metrics:
if singlecore:
if metric in line:
segments = line.split('|')
# | Runtime (RDTSC) [s] | 6.5219 |
value = float(segments[2].strip());
result[metric]["Sum"] = value
result[metric]["Min"] = value
result[metric]["Max"] = value
result[metric]["Avg"] = value
else:
if metric+" STAT" in line:
segments = line.split('|')
# | Runtime (RDTSC) [s] STAT | 27.4632 | 1.1443 | 1.1443 | 1.1443 |
result[metric]["Sum"] = float(segments[2].strip());
result[metric]["Min"] = float(segments[3].strip());
result[metric]["Max"] = float(segments[4].strip());
result[metric]["Avg"] = float(segments[5].strip());
except:
print ("Error: Could not process file '%s'!\n" % (file_path))
raise
return result | 7a4f2457790ffe12bc386b2d8f0d7374b19d9a26 | 693,338 |
def convert_to_list(list_or_dict):
"""Convert list to dict or return input list"""
if isinstance(list_or_dict, dict):
return [list_or_dict]
elif isinstance(list_or_dict, list):
return list_or_dict
else:
raise TypeError(f'Input should be a list or dict. Received {type(list_or_dict)}') | f2747ed1e4ef3f62c07b831cc97ce5ca677c8671 | 693,339 |
def bound_maker_erfrecterf(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_erfs):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_erfs (int): the number of erf-rect-erf features in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
lower = [amplitude_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds | 09b1eadb97d4204560a831ccaada796913d29db1 | 693,340 |
def kth_element(list_a: list, k: int):
"""Problem 3: Find the K'th Element of a List
Parameters
----------
list_a : list
The input list
k : int
The element to fetch
Returns
-------
element
The k'th element of the input list
Raises
------
TypeError
If the given argument is not of `list` type
ValueError
If the input list contains less than two elements, or the given k is less than 1
"""
if not isinstance(list_a, list):
raise TypeError('The argument given is not of `list` type.')
if len(list_a) < k:
raise ValueError(f'The input list contains less than [{k}] elements.')
if k < 1:
raise ValueError('The value of k cannot be less than 1.')
return list_a[k - 1] | 78e057ab595dfdb86622b0c7c6e3d3003788acd8 | 693,341 |
import statistics
def calculate_median_depths(document: dict) -> dict:
"""
Calculate the median depth for all hits (sequences) in a Pathoscope result document.
:param document: the pathoscope analysis document to calculate depths for
:return: a dict of median depths keyed by hit (sequence) ids
"""
depths = dict()
for hit in document["results"]:
depths[hit["id"]] = statistics.median(hit["align"])
return depths | 01aa2d4d539ad21f1e6ad71ea99532f1961a031a | 693,342 |
def string_to_ord(string):
"""Convert string to corresponding list of int values."""
return [ord(char) for char in string] | 2de4f1baa9ba96ba92d24e1b5306e5a4f42a4961 | 693,344 |
def problem_8_7(cents):
""" Given an infinite number of quarters (25 cents), dimes (10 cents),
nickels (5 cents) and pennies (1 cent), write code to calculate the number
of ways of representing n cents.
"""
smaller_vals = {
25: 10,
10: 5,
5: 1
}
def num_combinations(change, val):
""" Count the number of combination of value which sum up to change.
Args:
change: int,
val: int, one of 25, 10, 5 or 1
Returns:
int, the number of combinations.
"""
if val == 1: # Only one way to return change using only pennies.
return 1
# Compute the change using smaller values first.
smaller_val = smaller_vals[val]
ways = num_combinations(change, smaller_val)
# Compute change using current value and
times = change / val
for i in range(times):
ways += num_combinations(change - i*val, smaller_val)
return ways
return num_combinations(cents, 25) | 09dbd9ebeacf54ee8312200802fb6e460f719e9c | 693,350 |
def get_sh_input_config(cfg, data_source):
"""Get Sentinel Hub OGC configuration for given data source.
:param cfg: COnfiguration
:type cfg: dict
:param data_source: Sentinel Hub's data source
:type data_source: DataSource
:return: Sentinel Hub OGC configuration
:rtype: [type]
"""
for sh_input in cfg['sh_inputs']:
if sh_input['data_source'] == data_source.name:
return sh_input
return None | 95eebf6df4e3b713793504cf75b4d583060e0929 | 693,352 |
def calc_jaccard_index(multiset_a, multiset_b):
"""Calculate jaccard's coefficient for two multisets mutliset_a
and multiset_b.
Jaccard index of two set is equal to:
(no. of elements in intersection of two multisets)
_____________________________________________
(no. of elements in union of two multisets)
Note: intersection and union of two multisets is similar to union-all and
intersect-all operations in SQL.
Args:
multiset_a: list(int). First set.
multiset_b: list(int). Second set.
Returns:
float. Jaccard index of two sets.
"""
multiset_a = sorted(multiset_a[:])
multiset_b = sorted(multiset_b[:])
small_set = (
multiset_a[:] if len(multiset_a) < len(multiset_b) else multiset_b[:])
union_set = (
multiset_b[:] if len(multiset_a) < len(multiset_b) else multiset_a[:])
index = 0
extra_elements = []
for elem in small_set:
while index < len(union_set) and elem > union_set[index]:
index += 1
if index >= len(union_set) or elem < union_set[index]:
extra_elements.append(elem)
elif elem == union_set[index]:
index += 1
union_set.extend(extra_elements)
if union_set == []:
return 0
index = 0
intersection_set = []
for elem in multiset_a:
while index < len(multiset_b) and elem > multiset_b[index]:
index += 1
if index < len(multiset_b) and elem == multiset_b[index]:
index += 1
intersection_set.append(elem)
coeff = float(len(intersection_set)) / len(union_set)
return coeff | 89a4d358e77bef70710976f3339636a1880ef853 | 693,353 |
def session_ps_14bit(max_h, max_w):
"""Trim size to 14-bit limitation
"""
# why 16383 instead of 16384 for 14-bit?
max_h = max(max_h, 24)
max_w = max(max_w, 80)
max_h = min(max_h, 204) # 16383 // 80
max_w = min(max_w, 682) # 16383 // 24
if max_h >= 127 and max_w >= 129:
return 127, 129 # 127*129=16383
if max_h >= 129 and max_w >= 127:
return 129, 127 # 129*127=16383
if max_h * max_w <= 16383:
return max_h, max_w
return 16383 // max_w, max_w | 28b2dc812350a812cee21463ea8563479775cbdd | 693,354 |
def is_valid(isbn):
"""
Given a string the program will check if
the provided string is a valid ISBN-10.
:param isbn:
:return:
"""
# ISBN is invalid in case input string is empty
if not isbn or isbn == '':
return False
# Converting from strings to numbers
digits = []
for i in isbn:
if i.isdigit():
digits.append(int(i))
# Check digit of an ISBN-10 may be 'X' (representing '10')
if isbn[-1] == 'X':
digits.append(10)
# ISBN is invalid in case it has less than 10 digits
if len(digits) < 10:
return False
# Multiply ISBN members:
for n in range(10, 0, -1):
digits[n - 1] *= n
# Calculate mod and return the answer
# If the result is 0, then it is a valid ISBN-10, otherwise it is invalid:
return sum(digits) % 11 == 0 | 1df3d98a74cc1139db0f71ab6e7f8f1756dda278 | 693,356 |
def compute_prob(pattern_count, num_patterns, epsilon=1e-7):
"""
Compute probability of a pattern.
"""
return (pattern_count + epsilon) / ((num_patterns + epsilon) * (1 + epsilon)) | 6608c84b45643e13a6818ccddaf6c5b16a0828a5 | 693,359 |
import re
def time_to_ps(tstr):
"""
Convert a time with unit to a float in pico seconds.
Supported units: fs, ps, ns, us, ms
"""
prefactors = ['', 'm', 'u', 'n', 'p', 'f']
m = re.match('([\d.]+)([{}]?)s'.format(''.join(prefactors)), tstr)
if m is None:
raise ValueError('Could not parse time: {}'.format(tstr))
val, prefactor = m.groups()
decade = -3 * prefactors.index(prefactor) + 12
return float(val) * 10**decade | 1af8c3016000c0c5dc3a4dd64c215e2ca0c6e31a | 693,362 |
def feasible(x, constr):
"""
Checks the inequality constraints at x. If x is a feasible point,
returns True. If it is infeasible, returns the index of the first
constraint violated.
"""
if constr==None:
return True
for i in range(len(constr)):
fi_x = constr[i](x)
if fi_x > 0:
return i
return True | 9b37ba34c463d01e08ab0808f9ed004b9744df0b | 693,363 |
def prop_exists(printer, ast):
"""Prints an exists property "E ..."."""
prop_str = printer.ast_to_string(ast["prop"])
return f'E{prop_str}' | 8dfe50e664d95e53a892f54eabba6f7d6f1786bf | 693,364 |
import re
def find_hashtags(text):
"""
This function extracts and returns all hashtags (characters that start with # sign) found in a string variable.
:param text: a string variable
:return: a list of characters
"""
return re.findall(r'[#][^\s#@]+', text) | ab74f50cedab129d1a1183ddb7ee70a0dfde8701 | 693,365 |
def expand_optimization_args(group):
"""Expands the optimization related arguments with pytorch_translate
specific arguments"""
group.add_argument(
"--subepoch-validate-interval",
default=0,
type=int,
metavar="N",
help="Calculates loss over the validation set every N batch updates. "
"Note that validation is done at the end of every epoch regardless. "
"A value of <= 0 disables this.",
)
group.add_argument(
"--stop-time-hr",
default=-1.0,
type=float,
metavar="N",
help="Stops training after N hours have elapsed. Use decimal values "
"for sub-hourly granularity. A value of < 0 disables this.",
)
group.add_argument(
"--stop-no-best-validate-loss",
default=-1,
type=int,
metavar="N",
help="Stops training after N validations have been run without "
"achieving a better loss than before. Note that this is affected by "
"--validation-interval in how frequently we run validation in the "
"first place. A value of < 0 disables this.",
)
group.add_argument(
"--stop-no-best-bleu-eval",
default=-1,
type=int,
metavar="N",
help="Stops training after N evals have been run without "
"achieving a better BLEU score than before. Note that this is affected "
"by --generate-bleu-eval-interval in how frequently we run BLEU eval "
"in the first place. A value of < 0 disables this.",
)
group.add_argument(
"--shrink-lr-no-best-bleu-eval",
default=5,
type=int,
metavar="N",
help="Decay learning rate after N evals have been run without "
"achieving a better BLEU score than before. This is to achieve "
"decay lr within an epoch, independent of lr_scheduler. "
"Note that this is affected by --generate-bleu-eval-interval in "
"how frequently we run BLEU eval in the first place. "
"A value of < 0 disables this.",
)
group.add_argument(
"--pruning-percentile",
type=int,
default=0,
help="Proportion of weights to prune. A value <=0 disables pruning."
" By default, prunes weights uniformly and ignores bias terms.",
)
group.add_argument(
"--parameters-to-prune",
default="all",
help="Names of layers to prune. Layers are pruned if the argument is "
"a substring of the layer name. Options are 'all', 'embed', 'lstm'. ",
)
group.add_argument(
"--loss-beam",
type=int,
default=0,
help="Beam size to use for 'sequence_nll' loss and 'sequence_risk' "
"loss. If zero, use --beam.",
)
return group | 48c48f39ed05c8854347d434e7d163d3ede06344 | 693,367 |
import requests
def get_response_status(derived_from_url):
""" Get a response status code for derivedFrom value. Returns True if status code is 200."""
try:
r = requests.get(derived_from_url)
r.raise_for_status()
if r.status_code == 200:
return True
except requests.exceptions.HTTPError:
return False | d614455517742717b3d166481994f5c8b74d8e38 | 693,371 |
def extract_ROI(image_path, image, ROI):
"""Extract the region of interest out of an image
:param image_path: path to the image file
:type image_path: str
:param image: the image matrix
:type image: numpy.ndarray
:returns: numpy.ndarray -- the region of interest
"""
if ROI is None:
return image
if len(ROI) != 4:
raise TypeError("ROI needs to be of length 4")
x, y, w, h = ROI
height, width, _ = image.shape
if x < 0 or y < 0 or x + w > width or y + h > height:
raise ValueError("Invalid dimensions for ROI for image: %s"
% image_path)
return image[x:x + w, y:y + h] | f13b74573f03d5971459a381d4b5fd631867eb29 | 693,374 |
from typing import Mapping
def dict_merge(dct, merge_dct, add_keys=True):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
This version will return a copy of the dictionary and leave the original
arguments untouched.
The optional argument ``add_keys``, determines whether keys which are
present in ``merge_dict`` but not ``dct`` should be included in the
new dict.
Args:
dct (dict) onto which the merge is executed
merge_dct (dict): dct merged into dct
add_keys (bool): whether to add new keys
Returns:
dict: updated dict
"""
dct = dct.copy()
if not add_keys:
merge_dct = {
k: merge_dct[k]
for k in set(dct).intersection(set(merge_dct))
}
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
dct[k] = dict_merge(dct[k], merge_dct[k], add_keys=add_keys)
else:
dct[k] = merge_dct[k]
return dct | 5d3e356952bff8ee5b84a3401379dcd422e7a271 | 693,375 |
def create_tags(tags: list) -> list:
"""Prepares tags for a new upload. Keeps as many old tags as possible while adding a "nightcore" tag."""
to_add = 'nightcore'
# The total number of characters in YouTube video tags can't exceed 400.
# We're adding the "nightcore" tag, so we'll only keep this many characters of the original tags.
target_len = 400 - len(to_add)
new_tags = []
length = 0
# Keep tags up until they can no longer fit within our target.
for tag in tags:
length += len(tag)
if length < target_len:
new_tags.append(tag)
else:
break
new_tags.append(to_add)
return new_tags | b051d5cd4a27a867ea02b47f6dea26d7b88951b6 | 693,376 |
def parse_conf_intervals(ci_fh):
"""
Parse the StAR conf_intervals.txt file, each row a pair of methods
with AUC difference (this time WITH sign, so we know which is better
and which worse) and confidence interval
Parameters:
ci_fh - open filehandle to read conf_intervals.txt from
Return value:
dict { (method1,method2) : (auc_difference, cilower, ciupper) }
mapping pair of methods to difference in AUC (method1 - method2),
and lower and upper confidence interval value
"""
ci_dict = {}
lineno = 1
for line in ci_fh:
if lineno == 1:
lineno += 1
continue
sline = line.split('\t')
(method1,method2) = sline[0].split('/')
method1 = method1.lstrip('"').rstrip('"')
method2 = method2.lstrip('"').rstrip('"')
deltaAUC = float(sline[1])
cipair = sline[2] # ( -0.0642863 , -0.0410837 )
cilower = cipair.split(' ')[1]
ciupper = cipair.split(' ')[3]
ci_dict[(method1,method2)] = (deltaAUC, cilower, ciupper)
lineno += 1
return ci_dict | 8276bea42a48a0c9065a00361f1c6196c7755c88 | 693,378 |
def delta_tau_i(kappa_i, p_1, p_2, g):
"""
Contribution to optical depth from layer i, Malik et al. (2017) Equation 19
"""
return (p_1 - p_2) / g * kappa_i | 399e4376e11eefecda77e6c444beec61f5c402a1 | 693,379 |
from typing import OrderedDict
def walkdict(dict, match):
"""
Finds a key in a dict or nested dict and returns the value associated with it
:param d: dict or nested dict
:param key: key value
:return: value associated with key
"""
for key, v in dict.items():
if key == match:
jobid = v
return jobid
elif isinstance(v, OrderedDict):
found = walkdict(v, match)
if found is not None:
return found | cbcfb23006e432dafea5ddddc5948ac32a811932 | 693,380 |
def instance_to_queryset_string(instance):
""" Return a django queryset representation of an instance """
str_type = str(type(instance)).strip("<>'").split('.')[-1]
return "<{}: {}>".format(str_type, str(instance)) | 7e5165e7e17fca0bcad680ba299145c98f33ef19 | 693,382 |
def followers_count(user):
"""
Returns user followers count
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.followers() | 8c386bc2199661b2a771371a0e7c1833569a71c9 | 693,383 |
def level_message(sequence, level):
"""Return simon level message which contains the simon sequence"""
message = "\033[1;95mSequence {}:\n\n\033[4m{}".format(level,
" ".join(map(str, sequence)))
return message | d9f883cdf99621d46cb0ec40b0140ccfa58fb62e | 693,384 |
def sum_offset(data: list[int]) -> int:
"""Given a list of ints, compare alternating pairs (0,1), (1,2), (2,3) etc. If the second number is larger add 1
else 0. Return the total sum.
Each pair increases:
>>> sum_offset([1, 2, 3, 4, 5])
4
No pair increases:
>>> sum_offset([5, 4, 3, 2, 1])
0
Mix of both:
>>> sum_offset([5, 2, 6, 1, 83])
2
"""
return sum([1 if b > a else 0 for a,b in zip(data, data[1:])]) | 2459abdd351779c1bde8d8aa671d4c5a03afd18a | 693,391 |
import re
def zero_digits(s):
#{{{
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s) | ac0005da351b0f8ef7f31552ecc05d3189e4c730 | 693,396 |
def school_year(date, as_tuple=False):
"""
Return the school year of 'date'. Example:
* as_tuple = False: "2013 — 2014"
* as_tuple = True: [2013, 2014]
"""
if date.month < 8:
start_year = date.year - 1
else:
start_year = date.year
if as_tuple:
return (start_year, start_year + 1)
else:
return "%d — %d" % (start_year, start_year + 1) | b18fcde3abb76f5f90a48792c1e9be21da6ff276 | 693,397 |
import contextlib
import wave
def wav_duration(filename):
"""
get wav file duration in seconds
"""
with contextlib.closing(wave.open(filename,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
#print(duration)
return duration | 0fc9e513a21dcf6eea03ad935a620a63bd830bb6 | 693,399 |
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False | 72b80476d94d9dd8e2721afb587fa63b8704c820 | 693,403 |
def same_text(s1, s2):
"""True if both strings are the same, ignoring case."""
# note, unicodedata.normalize is a good idea to get this to work all the time,
# but this is only used here to compare against our simple directive names
return s1.casefold() == s2.casefold() | 265e21a7e6c51ceb4aa836f7eac022dc1136d3e8 | 693,404 |
import pathlib
def read_html(filename: str) -> str:
""" Read the html file and return its contents. """
return (pathlib.Path(__file__).resolve().parent / filename).read_text() | 3d89bdae436ac908c499574b0386c9641d226bb7 | 693,407 |
def formatString(oldStr):
"""
Format string into separate lines
*NOTE*
The newStr already has new lines and the developer should use the numLines return
to align any other strings to the newStr
Parameters
----------
oldStr: string
Old string to be Formatted
Returns
----------
numLines: int
Number of lines the new string will take up
newStr: string
New string with new lines
"""
LINE_LENGTH = 32
strList = oldStr.split(" ")
numLines = 1
newStr = ""
curLen = 0
for word in strList:
if (len(word) + curLen) > LINE_LENGTH:
numLines += 1
curLen = len(word) + 1
newStr += ("\n" + word + " ")
else:
curLen += (len(word) + 1)
newStr += (word + " ")
return numLines, newStr | cac7a604beae32d951d608ee6a4f47f5b363650c | 693,409 |
def sum_num(start, stop, step=1):
"""Returns the sum of an arithmetic sequence, from ```start``` to ```stop```
(inclusive), stepping by ```step```.
Equivalent to ```sum(range(start, stop, step))``` \
(but works better on large inputs)."""
num_terms = (stop - start)//step + 1
return (start+stop)*num_terms/2 | 45515df91bd3b7bcea3e3007da0d14cf6b9e6f89 | 693,413 |
def get_column_index(column):
"""Get column index from name, e.g. A -> 1, D -> 4, AC -> 29.
Reverse of `get_column_letter()`
"""
column = str(column).lower()
col = 0
for digit, char in enumerate(column[::-1]):
value = ord(char) - 96
col += (26 ** digit) * value
return col | da595fb3aa197c50eb11bb1b89dfb1877c0e223d | 693,414 |
from pathlib import Path
def completely_pruned(wkspace):
"""Determine which systematics are completely pruned.
Parameters
----------
wkspace : str or os.PathLike
Path of TRExFitter workspace.
Returns
-------
list(str)
Names of all completely pruned systematics.
"""
pairs = []
with (Path(wkspace) / "PruningText.txt").open("r") as f:
for line in f:
if not line.startswith(" --->>"):
continue
sys, status = line.strip()[7:].split(" ")
if status == "is not present":
continue
pairs.append((sys, status))
unique = sorted(set([p[0] for p in pairs]))
tests = {u: 0 for u in unique}
for sys, status in pairs:
k = 0
if status == "is kept":
k = 1
elif status == "is shape only":
k = 1
elif status == "is norm only":
k = 1
tests[sys] += k
return [k for k, v in tests.items() if v == 0] | 8026e6ba23280c70a0da8474b92b52b502455bc6 | 693,418 |
def is_word(word):
"""check whether it is an English word."""
for item in list(word):
if item not in 'qwertyuiopasdfghjklzxcvbnm':
return False
return True | 2414d264f7052244e7bb4d1cd45fb2db8160f6f7 | 693,424 |
def unconvolve_sequences(window):
"""
:param window: a numpy array of sequences of ids that was windowed
:return: the middle column
"""
if len(window.shape) == 1:
# it is already a vector
return window
middle = window.shape[1] // 2
return window[:, middle] | 1532cd545d25de5d57451280416e724ecd45ae5e | 693,425 |
def convert_string_to_bool(string):
"""Converts string to bool.
Args:
string: str, string to convert.
Returns:
Boolean conversion of string.
"""
return False if string.lower() == "false" else True | 6a481537671016a31a3406b12afa72143a36e153 | 693,426 |
def get_description(arg):
"""Generates a proper description for the given argument"""
desc = []
otherwise = False
if arg.can_be_inferred:
desc.append('If left unspecified, it will be inferred automatically.')
otherwise = True
elif arg.is_flag:
desc.append('This argument can be omitted.')
otherwise = True
if arg.type in {'InputPeer', 'InputUser', 'InputChannel'}:
desc.append(
'Anything entity-like will work if the library can find its '
'<code>Input</code> version (e.g., usernames, <code>Peer</code>, '
'<code>User</code> or <code>Channel</code> objects, etc.).'
)
if arg.is_vector:
if arg.is_generic:
desc.append('A list of other Requests must be supplied.')
else:
desc.append('A list must be supplied.')
elif arg.is_generic:
desc.append('A different Request must be supplied for this argument.')
else:
otherwise = False # Always reset to false if no other text is added
if otherwise:
desc.insert(1, 'Otherwise,')
desc[-1] = desc[-1][:1].lower() + desc[-1][1:]
return ' '.join(desc).replace(
'list',
'<span class="tooltip" title="Any iterable that supports len() '
'will work too">list</span>'
) | 0109296612d73c33c1a41aac76004708e3af0483 | 693,427 |
def one_in(setA, setB):
"""Check the presence of an element of setA in setB
"""
for x in setA:
if x in setB:
return True
return False | b2796fd9cfc1f93a71ed173430e67bc3c657eea9 | 693,430 |
def isPrime(n):
"""
Checks if a natural number is prime
:param n: the number (a positive integer) being checked
:return: boolean
"""
# prime larger than sqrt(n) cannot divide n (Number Theory 101)
cap = int( n**(.5) )
if n % 2 == 0: # remove the single even case
return False
for i in range(3, cap, 2): # ... to run through possible odd divisors
if n % i == 0:
return False
else:
return True | f488da3c9962fc88cfc4d952efab97af9e5b0dd7 | 693,433 |
from typing import Tuple
from typing import Dict
from typing import Any
def postprocess_fbound(
smfb_0: float, smfb_1: float
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Postprocess frequency bounds
Args:
smfb_0 (float): Lower bound
smfb_1 (float): Upper bound
Returns:
dict, dict: Postprocessed settings and parameters as dictionaries"""
if smfb_0 > smfb_1:
smfb_0, smfb_1 = (smfb_1, smfb_0)
elif smfb_0 == smfb_1:
smfb_0 = 20
smfb_1 = 16000
in_kw = dict(
smfb_0=smfb_0,
smfb_1=smfb_1,
)
out_kw = dict(
sinusoidal_model__frequency_bounds=(
smfb_0, smfb_1
)
)
return in_kw, out_kw | 19efb4b4f7659bc157cd6ac1927fd4e18455f243 | 693,435 |
def str_to_tuple(astr):
"""Helper function for creating a vector (tuple) from the given string."""
return tuple(map(lambda c: ord(c) - ord("a"), astr)) | 15ac36704bd1d09fc8a2b8d0b6c8cc764335206a | 693,437 |
import json
def json_pretty_print(json_dict):
"""pretty print json data"""
return json.dumps(json_dict,
indent=2,
sort_keys=True) | 722e27ffda86b0ee86e642771fb036d4c55f1e20 | 693,438 |
def to_str(s, integer=True):
"""Convert a column to strings
usage: new_var = data.apply(lambda f : to_str(f['COLNAME']) , axis = 1)
integer: boolean for whether to convert to int first
"""
try:
if integer:
s1 = str(int(s))
else:
s1 = str(s)
return s1
except ValueError:
return s | 0239df3715d7e153e3ba3bbee31bf9b98d4505ed | 693,439 |
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data | 4403a21f8acacdaf609a46f51a103cf00a91bfde | 693,440 |
def get_number_aliens_x(ai_settings, alien_width):
"""Determine the number of aliens that fit in a row."""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
# number_aliens_x = int(available_space_x / (alien_width))
return number_aliens_x | 2684a628d9c957bd7e3edbdb8eb98b83fa4c599f | 693,441 |
def getCenter(box):
"""
This function calculates the center of a bounding box.
"""
# get the corners of the box
x1 = box[0]
x2 = box[2]
y1 = box[1]
y2 = box[3]
# find the middle along the x axis
center_x = int((x1+x2)/2)
# find the middle along the y axis
center_y = int((y1+y2)/2)
# return that position
return (center_x, center_y) | d09a8954e1489f58a9bfa774fca3263eaa97c864 | 693,443 |
def media_final_aprovado_reprovado(prova, exercicio, projeto):
""" Recebe as notas de 1 prova, 1 exercício e 1 projeto, e retorna
se o aluno foi ou não aprovado. A prova tem peso 4, o exercício
tem peso 1, e o projeto tem peso 3."""
nota1 = prova * 4
nota2 = exercicio * 1
nota3 = projeto * 3
mediafinal = (nota1 + nota2 + nota3) / 8
return mediafinal >= 7 | 4ec5cfbbf721fbe65aa8ad3d2fd0f3806703c8fc | 693,444 |
import json
def dict_to_bytes(data : dict) -> bytes:
"""
Convert dict to bytes
=====================
Parameters
----------
data : dict
Data to convert.
Returns
-------
bytes
The converted data.
"""
return json.dumps(data).encode('utf-8') | 6301ff8e819d09c80a3568c3b9bd45648fd72a57 | 693,446 |
from pathlib import Path
from typing import Tuple
from typing import List
def python_paths_find(python_package_path: Path, tracing: str = "") -> Tuple[Path, ...]:
"""Find all of the python files inside of a Python package."""
# next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>python_paths_find({python_package_path})")
python_path: Path
python_files: List[Path] = [
python_path
for python_path in python_package_path.glob("*.py")
if python_path.name != "__init__.py"
]
python_files.sort()
if tracing:
print(f"{tracing}<=python_paths_find({python_package_path})=>{python_files}")
return tuple(python_files) | 254841b048469c19cca4f6f4aadf57ecc5e5aea1 | 693,449 |
def reformat(formula):
"""Add spaces around each parens and negate and split the formula."""
formula = ''.join(f' {i} ' if i in '~()' else i for i in formula)
return formula.split() | 661327cc35faa2fe85c6fd0e38013f2ae4b55482 | 693,450 |
from dateutil import tz
def to_local_time_zone(date_input):
"""Returns datetime object transformed to the local time zone.
Args:
date_input (datetime): value to be transformed.
"""
if date_input.tzinfo is None:
return date_input
else:
return date_input.astimezone(tz.tzlocal()).replace(tzinfo=None) | 857f39f45c5cf58e67cb0de79247f32dffe563d3 | 693,451 |
def get_int_argument(args, argname, default=0):
"""
Helper function to extract an integer argument.
Will raise ValueError if the argument exists but is not an integer.
Will return the default if the argument is not found.
"""
arg = args.get(argname)
if arg is not None:
try:
arg = int(arg)
except ValueError:
raise ValueError('Error while parsing argument %s' % argname)
else:
arg = default
return arg | a8320a8fcc5730e2651555b1b68849ccdee2e118 | 693,453 |
import math
def _GetPercentile(sortedlist, percent):
"""Returns a desired percentile value of a sorted list of numbers.
E.g., if a list of request latencies is
[1, 4, 7, 14, 34, 89, 100, 123, 149, 345], and percent is 0.9, the result
is 149. If percent is 0.5 (median), result is 34.
Args:
sortedlist: A sorted list of integers, longs or floats.
percent: A fraction between 0 and 1 that indicates desired
percentile value. E.g., 0.9 means 90th percentile is desired.
Returns:
None if list is empty. Else, the desired percentile value.
"""
if not sortedlist:
return None
k = int(math.ceil(len(sortedlist) * percent)) - 1
if k < 0:
k = 0
return sortedlist[k] | c3d726861feb8a28493e9616fad071394eb14e34 | 693,455 |
def path_vars_in(d):
"""
Extract all (and only) the path vars in a dictionary.
:param d: a .paths.json data structure
:return: all path var definitions without any special entries like '__ENV'
"""
return [p for p in d.items() if p[0] != '__ENV'] | 5551483660477d8f16e057c1b723413b78055408 | 693,456 |
import math
def josephus_problem_2(n: int) -> int:
"""
J(2^m + l, 2) = 2 * l + 1
"""
m = int(math.log2(n))
pow_m = pow(2, m)
return 2 * (n - pow_m) + 1 | 5f4e69b5de1ca8d175565c7da604557e0865160c | 693,457 |
import string
import secrets
def random_string(
n: int = 8,
*, _ALPHABET=''.join([string.ascii_lowercase, string.digits])
) -> str:
"""Generate a random string of lowercase ascii letters and digits.
Parameters
----------
n: int, optional
The number of characters which the output string will have. Default=8
"""
token = [secrets.choice(_ALPHABET) for i in range(n)]
return ''.join(token) | cdb28a85398e8c59a4ac53441f7be2fd71d26cc8 | 693,465 |
def format_production_set(productions):
"""Renders a set of productions in a human-readable format."""
return "\n".join([str(production) for production in sorted(productions)]) | a5bcf086d15356f6d532c0fae0e2a736f5869d8a | 693,467 |
def generateJobName(key):
"""
Transcribe job names cannot contains spaces. This takes in an S3
object key, extracts the filename part, replaces spaces with "-"
characters and returns that as the job-name to use
"""
# Get rid of leading path, and replace [SPACE] with "-"
response = key
if "/" in key:
response = response[1 + key.find('/'):]
response = response.replace(" ", "-")
return response | c52cc4d3417f85c55ab69f63016608b0ff4ff7ca | 693,468 |
def exceeds_max_lines(filepath: str, max_lines: int) -> bool:
"""
Test if given file in ``filepath`` has a content exceeds max lines
``max_lines``.
>>> exceeds_max_lines(__file__, 1000000)
False
>>> exceeds_max_lines(__file__, 10)
True
"""
with open(filepath) as fobj:
# Which is better?
# return len(fobj.readliens()) > max_lines
for idx, _line in enumerate(fobj):
if idx + 1 > max_lines:
return True
return False | c5f8342359245c9d00dc2ca42eea2187d9c22c12 | 693,469 |
import functools
import asyncio
def async_test(wrapped):
"""
Run a test case via asyncio.
Example:
>>> @async_test
... async def lemon_wins():
... assert True
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
return asyncio.run(wrapped(*args, **kwargs))
return wrapper | bb30d599e636bd0d33e1169637e4ab5fdf8c86e6 | 693,471 |
import functools
import time
def compute_time_ms(func):
"""
Decorator for computing the amount of time taken to do something
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
original_result = func(*args, **kwargs)
time_elapsed = time.time() - start_time
print(f"Time taken: {time_elapsed * 1000}")
return original_result
return wrapper | 6b4807f09d1d3fe1bdace0e9924be7ddd859b799 | 693,474 |
def generate_specificity_at_threshold(threshold, weighted=False):
"""
Returns a lambda function that computes the specificity at a provided threshold.
If weights = True, the lambda function takes a third argument for the sample weights
"""
if not weighted:
return (
lambda labels, pred_probs: (
(labels == 0) & (labels == (pred_probs >= threshold))
).sum()
/ (labels == 0).sum()
if (labels == 0).sum() > 0
else 0.0
)
else:
return (
lambda labels, pred_probs, sample_weight: (
((labels == 0) & (labels == (pred_probs >= threshold))) * sample_weight
).sum()
/ ((labels == 0) * sample_weight).sum()
if (labels == 0).sum() > 0
else 0.0
) | 363ed90992c6fca04967e84a71ad1c1a2c84538f | 693,475 |
def is_builtin_entity(oid):
"""Return if the OID hex number is for a built-in entity."""
# More information in get_oid
oid_num = int(oid, 16)
return oid_num & 0xC0 == 0xC0 | 662aea7ecf6d1b8671f9e2513576bbb448ffcae6 | 693,480 |
def get_AZN(nco_id):
"""Returns mass number :math:`A`, charge :math:`Z` and neutron
number :math:`N` of ``nco_id``.
Args:
nco_id (int): corsika id of nucleus/mass group
Returns:
(int,int,int): (Z,A) tuple
"""
Z, A = 1, 1
if nco_id >= 100:
Z = nco_id % 100
A = (nco_id - Z) // 100
else:
Z, A = 0, 0
return A, Z, A - Z | ae392893b26bf714c3044888df05c0cc63458eea | 693,482 |
def stopwatch_format(ticks):
"""
Convert tenths of seconds to formatted time
"""
minutes = ticks // 600
# minutes = ticks // 60
tens_seconds = (ticks // 100) % 6
seconds = (ticks // 10) % 10
tenths = ticks % 10
return str(minutes) + ':' + str(tens_seconds) + \
str(seconds) + '.' + str(tenths) | e2b0cdb644beed43451da8db2c1ce1ff80be10c4 | 693,486 |
def _approximate_bias(b, name):
"""
Find a reasonable match for the given name when we have existing biases in gazetteer entry.
Otherwise if the name is just long enough it should be rather unique and return a high bias ~ 0.3-0.5
If a name varies in length by a third, we'll approximate the name bias to be similar.
:param b: dict of name:bias
:param name: normalize name
:return:
"""
if name in b:
return b.get(name)
nmlen = len(name)
diff = int(nmlen / 3)
for n in b:
nlen = len(n)
if abs(nmlen - nlen) < diff:
return b.get(n)
if nmlen >= 20:
return 0.40
return 0.05 | 00bea5b36b1050f74a4763795dae6626d2db8f44 | 693,490 |
def get_e_rtd_default(hw_type):
"""当該給湯機の効率
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
Returns:
float: 当該給湯機の効率
"""
if hw_type in ['ガス潜熱回収型給湯機', 'ガス潜熱回収型給湯温水暖房機']:
return 0.836
elif hw_type in ['ガス従来型給湯機', 'ガス従来型給湯温水暖房機']:
return 0.704
else:
raise ValueError(hw_type) | efc90c2a19d894093e790518e4d67f9fa4d3d700 | 693,491 |
def get_dict_value(dict_, key):
"""
Return the value for key in the (JSON) dictionary.
"""
return dict_[key] if key in dict_ else None | 32e12b0443adbcf4e5777ceaa4498e8aae60b002 | 693,492 |
def nearest_power_of_two(x):
"""
Return a number which is nearest to `x` and is the integral power of two.
Parameters
----------
x : int, float
Returns
-------
x_nearest : int
Number closest to `x` and is the integral power of two.
"""
x = int(x)
x_lower = 1 if x == 0 else 2 ** (x - 2).bit_length()
x_upper = 1 if x == 0 else 2 ** (x - 1).bit_length()
x_nearest = x_lower if (x - x_lower) < (x_upper - x) else x_upper
return x_nearest | 77460ea1adeabbf4ba2eee50993496916b693510 | 693,497 |
import re
def remove_escape_sequences(string):
"""
Replaces all contiguous instances of "\r\n\t\v\b\f\a " and replaces
it with a single space. Preserves at most one space of surrounding whitespace
"""
escape_seqs = r'[\r\n\t\v\b\f\a ]+'
return re.sub(escape_seqs,' ', string) | 6f6fb756125569cba1882e936c7c796125447c22 | 693,506 |
def dnac_get_modules(dnac_session, dnac_host, dnac_headers, device_id):
"""DNAC Modules of a Network Device"""
tmp_url = 'https://%s/dna/intent/api/v1/' % dnac_host
tmp_url = tmp_url + 'network-device/module?deviceId=%s' % device_id
r = dnac_session.get(tmp_url,
verify=False,
headers=dnac_headers
)
r.raise_for_status()
# print('DNAC Response Body: ' + r.text)
return r.json()['response'] | 4cf0ba4c7afa75c60016b30fe933609ce7b32a56 | 693,507 |
import re
def extract_patents(line, default_country=None):
"""
Extract a list of patent number strings
Supports only standard patent publication number like WO2012066519
Tolerance with spaces, punctuations, slashes
Keep application numbers like PCT/IB2011055210
If a iso2 country code is provided, then it will try match just a number
:param line: The free text line
:param default_country: The default country
:return:
"""
result = []
line = ("" if default_country is None else " " + default_country) + " " + line + " "
for m in re.findall("(?:[^a-z])((?:pct/?)?[a-z]{2})([0-9 ,/]{6,})", line.lower()):
num = re.sub("[ ,-/]", "", m[1])
country = m[0].upper()
if len(num) < 5:
continue
result.append(country+num)
return result | af21658e2ba42f3f76c49f6aa15a54fcbf76238a | 693,510 |
def floyd_warshall(G):
"""find all shortest paths in a dense integer weighted directed graph
An implementation of the Floyd-Warshall algorithm[1][2]. Complexity is
cubic: O(n**3). If the weights in G are small, complexity can be as low as
O(n**2.575) [3].
Arguments:
G: Type List[List[int]]. A dense directed graph in the form of a square
matrix. An element G[i][j] is the cost to go from node i to node j.
Returns:
result[0]: Type List[List[int]]. Matrix of shortest path lengths. Each element
result[0][u][v] is the shortest distance from node u to node v.
result[1]: Type List[List[int]]. Matrix of path successors. Each element result[1][u][v] is
the node w immediately after node u in the shortest path from node u to node v.
Raises:
ValueError: If a negative cycle exists in G.
References:
[3] Zwick, Uri. "All pairs shortest paths using bridging sets and
rectangular matrix multiplication." Journal of the ACM (JACM) 49.3 (2002):
289-317.
"""
# Step 1: initialize graph
n = len(G)
D = [[e for e in row] for row in G] # Minimum distance between nodes
P = [[v for v in range(n)] for u in range(n)] # Successor of a node in its shortest path
# Step 2: update edges repeatedly
for w in range(n):
for u in range(n):
for v in range(n):
diff = D[u][w] + D[w][v]
if D[u][v] > diff:
D[u][v] = diff
P[u][v] = P[u][w]
# Step 3: check for negative-weight cycles
for v in range(n):
if D[v][v] < 0:
raise ValueError("Graph contains a negative-weight cycle")
return (D, P) | ec5a9a9df47d0fb49820cb97658a24fc629c477c | 693,512 |
def unf_bo_below_pb_m3m3(rho_oil_st_kgm3=820, rs_m3m3=100, rho_oil_insitu_kgm3=700, gamma_gas=0.8):
"""
Oil Formation Volume Factor according McCain correlation for pressure below bubble point pressure
:param rho_oil_st_kgm3: density of stock-tank oil, kgm3
:param rs_m3m3: solution gas-oil ratio, m3m3
:param rho_oil_insitu_kgm3: Oil density at reservoir conditions, kgm3
:param gamma_gas: specific gas density(by air)
:return: formation volume factor bo, m3m3
ref1 book Mccain_w_d_spivey_j_p_lenn_c_p_petroleum_reservoir_fluid,third edition, 2011
"""
# коэффициенты преобразованы - смотри описанию в ноутбуке
bo = (rho_oil_st_kgm3 + 1.22044505587208 * rs_m3m3 * gamma_gas) / rho_oil_insitu_kgm3
return bo | 932047445c902e8af233bb276524da70e16cb599 | 693,513 |
from typing import Callable
def euler_step(f: Callable[[float, float], float], t_k: float, y_k: float, h: float) -> float:
"""
Computes the euler step function for a given function
Parameters:
f (function) - The derivate to approximate the integral of
t_k (float)
y_k (float)
h (float) - Step size
"""
y_k1 = y_k + f(t_k, y_k)*h
return y_k1 | 69e5caea31fb7d2c84d52b408e5350c20564e81b | 693,515 |
def flatten (alst):
"""A recursive flattening algorithm for handling arbitrarily nested iterators
>>> flatten([0, [1,(2, 3), [4, [5, [6, 7]]]], 8])
[1, 2, 3, 4, 5, 6, 7, 8]
"""
def _recur (blst):
for elem in blst:
if hasattr(elem, "__iter__"):
for i in _recur(elem):
yield i
else:
yield elem
return list(_recur(alst)) | 8f89aab3dba6e73ba92273f2d8f2f631a5071918 | 693,516 |
def rivers_with_station(stations):
"""
Given list of stations, return as set of all the
rivers names contained within these stations
"""
return set([station.river for station in stations]) | 40e4dbcfdc87dcacac39e7d9fdaef17ecf34cb7c | 693,519 |
def parse_darkhorse(input_f, output_fp, low_lpi=0.0, high_lpi=0.6):
""" Parse output of DarkHorse (smry file).
Paramters
---------
input_f: string
file descriptor for Consel output results
output_fp: str
Filepath to output best hit genome IDs
low_lpi: float
lower LPI (lineage probability index) score bound
high_lpi: float
upper LPI score bound
Returns
-------
hgts: string
one putative HGT-derived gene per line
columns: query_id, besthit_id, tax_id, species, lineage, pct_id,
pct_coverage, norm_LPI
Notes
-----
Parse output of DarkHorse to return tab-separated file of putative HGTs
using the LPI bounds and a file with all best hit genome IDs.
"""
best_hit_ids = set()
hgts = []
# skip header
next(input_f)
for line in input_f:
x = line.strip('\r\n').split('\t')
best_hit_ids.add(x[3])
if low_lpi < float(x[5]) < high_lpi:
hgt = '\t'.join((x[0], x[3], x[12], x[13], x[14],
x[6], x[9], x[4]))
hgts.append(hgt)
if output_fp:
with open(output_fp, 'w') as output_f:
output_f.write('\n'.join(best_hit_ids))
return '\n'.join(hgts) | 2d963835a59ce8aac6ab00528aecf24f4bb4eac4 | 693,520 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.