content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def run(result):
"""Function to test True return"""
return True | ac4499ed80ddc285df83b98bb59933c6412eef4d | 687,916 |
def get_probe_hit(tree, gene_info, r, is_gtf=False):
"""
Given a dict tree (from read_probe_bed) and a GMAP SAM record
Go through each exon and find probes that hit it
Return: (number of probes hit), (total number of bases overlapping with probes), (genes seen)
"""
probes_seen = set()
genes_seen = set()
base_hit = 0
if is_gtf: r.sID, r.segments = r.chr, r.ref_exons
if r.sID not in tree: return 0, 0, set()
for e in r.segments:
hits = tree[r.sID].find(e.start, e.end)
if len(hits) == 0: continue
for i,strand,intl in hits:
if (strand is None) or (strand==r.strand):
probes_seen.add(i)
genes_seen.add(gene_info[i])
base_hit += min(e.end,intl.end)-max(e.start,intl.start)
return len(probes_seen), base_hit, genes_seen | ea01fdc835851f3960848d5aa2071c1f57c5e970 | 687,922 |
def firstof(*args, default=None):
"""
Returns the first value which is neither empty nor None.
"""
if len(args) == 1:
iterable = args[0]
else:
iterable = args
for value in iterable:
if value:
return value
return default | a3ff74bff3a445b6d8a83320c87ce5cd0681f87f | 687,923 |
import functools
def _validate_satellite(func):
"""A decorator that checks to see if the satellite is in the dataset."""
@functools.wraps(func)
def wrapper(self, satellite, *args, **kwargs):
# First argument has to be satellite
if satellite not in self.ds['satellite']:
raise IndexError("Satellite not in dataset, must be one of: "
"{}".format(self.ds['satellite'].values))
return func(self, satellite, *args, **kwargs)
return wrapper | 747b81a917489a33a3b4e0f6709853618133cfb0 | 687,924 |
def count_lines(filename):
"""Count the number of lines in a source file
Return a pair (n0, n1), where n0 is the total
number of lines, while n1 is the number of non-empty
lines
"""
with open(filename) as f:
lines = f.readlines()
n0 = len(lines)
n1 = 0
for line in lines:
line = line.strip()
if len(line) > 0:
n1 += 1
return (n0, n1) | e88e712755006f57f2850886cbc3c1c7ee4407ec | 687,925 |
def concat(str_1: str, str_2: str) -> str:
"""將兩個字串串接在一起
Args:
str_1 (str): 字串 1
str_2 (str): 字串 2
Raises:
TypeError: 當任一參數不為 str 時,拋出 TypeError
Returns:
str: str_1+str_2
"""
if not (isinstance(str_1, str) and isinstance(str_2, str)):
raise TypeError("錯誤型態")
else:
return str_1 + str_2 | 619422096196279145df02def20a1a93fc24791f | 687,934 |
def right_rotate(n: int, b: int) -> int:
"""
Right rotate the input by b.
:param n: The input.
:param b: The rotation factor.
:return: The input after applying rotation.
"""
return ((n >> b) | ((n & 0xffffffff) << (32 - b))) & 0xffffffff | ce12e3af449390f7fbc2a3755264512d416750c9 | 687,935 |
def unstack_batch(tensor, B):
"""Reverses stack_batch."""
N = tensor.shape[0] // B
return tensor.reshape(B, N, *tensor.shape[1:]) | 5e99ad03a85a54fc4b925efacc1e6cfa9d50920e | 687,936 |
import datetime as dt
def check_date(indate):
"""Check representations of date and try to force into a datetime.date
The following formats are supported:
1. a date object
2. a datetime object
3. a string of the format YYYYMMDD
4. a string of the format YYYYDDD
Formats 2-4 are all converted into a date object internally.
"""
if isinstance(indate, dt.datetime):
return indate.date()
elif isinstance(indate, dt.date):
return indate
elif isinstance(indate, str):
skey = indate.strip()
l = len(skey)
if l==8:
# assume YYYYMMDD
dkey = dt.datetime.strptime(skey,"%Y%m%d")
return dkey.date()
elif l==7:
# assume YYYYDDD
dkey = dt.datetime.strptime(skey,"%Y%j")
return dkey.date()
else:
msg = "Input value not recognized as date: %s"
raise KeyError(msg % indate)
else:
msg = "Input value not recognized as date: %s"
raise KeyError(msg % indate) | e17c6637638cf626385ec6c3ddb359f4c22dbf95 | 687,939 |
def tokenize_char(sent):
"""
Return the character tokens of a sentence including punctuation.
"""
return list(sent.lower()) | f8dc50f92239bab90633cd4643f5b73c707c1519 | 687,942 |
import getpass
def get_user_name() -> str:
""" Gets the username of the device
Returns
-------
username : str
Username of the device
"""
return getpass.getuser() | ea8388520078afa90b89e10501f9a49fc29852a8 | 687,943 |
def quantile(values, p):
"""
Returns the pth-percentile value
>>> quantile([2, 4, 6, 8], 0.25)
4
>>> quantile([3, 2, 6, 4, 8, 5, 7, 1, 9, 11, 10], 0.5)
6
>>> quantile([3, 2, 6, 4, 8, 5, 7, 1, 9, 11, 10], 0.55)
7
>>> quantile([3, 2, 6, 4, 8, 5, 7, 1, 9, 11, 10], 0.75)
9
"""
return sorted(values)[int(p * len(values))] | 7b19b24aa668fbcfc2743ca17def719ffea1751a | 687,945 |
def InStr(*args):
"""Return the location of one string in another"""
if len(args) == 2:
text, subtext = args
return text.find(subtext)+1
else:
start, text, subtext = args
pos = text[start-1:].find(subtext)
if pos == -1:
return 0
else:
return pos + start | d27efe4d0741f754e093d7621ceba172929edc98 | 687,949 |
def create_url(controller_ip, endpoint):
"""Create endpoint url to POST/PUT/GET/DELTE against."""
return 'https://%s:1080/%s' % (controller_ip, endpoint) | 8cb5be8fb102dd2160a58fe7e3f192387374371e | 687,950 |
def parse_psipred_file(psipred_output):
"""
This function parses the psipred output file and returns the
secondary structure predicted.
"""
opened_file = open(psipred_output).readlines()
seq = ""
for line in opened_file:
line = line.strip().split(" ")
seq += line[2]
return seq | ceef189d62c73f82bb8cb1e589cf217234050d53 | 687,951 |
def custom(x,w,lambdafunc=None):
"""Custom model (for example, for real data fitting).
Parameters
----------
x : numpy array (N,), dtype=float
Grid points in which the model will be evaluated. N is the number of grid points.
w : numpy array (N,), dtype=float
Weights used to evaluate integrals by the Gaussian quadrature.
lambdafunc: function or None
Either supply a function, or leave None if the model of the generated data is not known, in which case
x and w are ignored.
Returns
-------
peq : numpy array (N,), dtype=float
Probability density distribution evaluated at grid ponits x.
"""
if lambdafunc is not None:
peq=lambdafunc(x)
peq /= sum(w*peq)
else:
peq=None
return peq | b0e6dd6ea0d705b92e7b85bf7558aac18c751024 | 687,954 |
def pep8_filter(line):
"""
Standard filter for pep8.
"""
if 'argweaver/bottle.py' in line:
return False
return True | 46368c7ad7385e6eb04c5fc6b40094b2fb0d64e6 | 687,955 |
import re
def sanitize(s):
"""
Removes characters that are not allowed in macro names. Anything
that's not alphanumeric is replaced with underscore.
"""
return re.sub(r"\W", '_', s) | 64cdc59d00c7e63b046b68e0944ddb2c7f442d7a | 687,958 |
def crop_to(image_to_crop, reference_image):
"""
Crops image to the size of a reference image. This function assumes that the relevant image is located in the center
and you want to crop away equal sizes on both the left and right as well on both the top and bottom.
:param image_to_crop
:param reference_image
:return: image cropped to the size of the reference image
"""
reference_size = reference_image.size
current_size = image_to_crop.size
dx = current_size[0] - reference_size[0]
dy = current_size[1] - reference_size[1]
left = dx / 2
upper = dy / 2
right = dx / 2 + reference_size[0]
lower = dy / 2 + reference_size[1]
return image_to_crop.crop(
box=(
int(left),
int(upper),
int(right),
int(lower))) | 1c12242e1c4fcf9043b75162efdc09aa0fbb75c1 | 687,959 |
def _isbn_has_valid_checksum(identifier):
"""Determine whether the given ISBN has a valid checksum."""
if len(identifier) == 10:
identifier = '978' + identifier
numerals = [int(char) for char in identifier]
checksum = 0
for i, numeral in enumerate(numerals):
weight = 1 if i % 2 == 0 else 3
checksum += weight * numeral
return (checksum % 10) == 0 | 9c92104d192ed86a2a139d0cf25cbb5263f64bae | 687,960 |
def _find_physio(subject, session, bids_path):
"""Get physilogy data from BIDS dataset."""
physio_path = list(
bids_path.glob(f"**/sub-{subject}_ses-{session}*_physio.tsv.gz")
)
if physio_path and len(physio_path) == 1:
return physio_path[0]
else:
raise ValueError("No associated physiology file") | a5938d6994f8898bdd204ac974f7e7d3374e3ebe | 687,961 |
def _is_linux_os(rctx):
"""Returns true if the host operating system is Linux"""
return rctx.os.name.lower().startswith("linux") | b7de6b4cc1e133b9a65cd43441b1571497c9d548 | 687,963 |
import collections
def parse_file(path):
"""
Parse a spikes file: fail hard! If parsing does not work,
print offending line and exit(1)
returns dict keyed on gid.
each value is a list of (spiketimes, the line number, line )
"""
fp = open(path, "r")
parsed_data = collections.defaultdict(list)
line_idx = 0
for line in fp.readlines():
stripped_line = line.strip()
split_items = stripped_line.split()
try:
gid = int(split_items[0].strip())
time = float(split_items[1].strip())
except:
print("Could not parse a line in the file!!!! \n")
print(" line: " , line_idx, ": ", stripped_line)
print(path)
exit(1) #failure
line_data = (line_idx, time, stripped_line)
parsed_data[gid].append(line_data)
line_idx += 1
return parsed_data | 0c9197a3c520be21721b8aa251ea61457bd2f9fc | 687,966 |
def _get_full_customization_args(customization_args, ca_specs):
"""Populates the given customization_args dict with default values
if any of the expected customization_args are missing.
"""
for ca_spec in ca_specs:
if ca_spec.name not in customization_args:
customization_args[ca_spec.name] = {
'value': ca_spec.default_value
}
return customization_args | 48c37f7ab3a48970cb9ed65c279c2e00c5245e58 | 687,968 |
import math
def eq141d10(l, sx, a, sum_st, iy, fy, e):
"""Compression in extreme fibers of box type flexural members
AREMA 2018 Section 1.4.1 Table 15-1-11 Row 10
Compression in the extreme fibers of box type welded or bolted flexural
members symmetrical about the principal axis midway between the webs
(l/r)e = math.sqrt(1.105*math.pi/sxx*sqrt(sum(s/t))/
a*math.sqrt(i_yy/(1+mu)))
fa_bc = 0.55*fy - 0.55*math.pow(fy,2)/(6.3*math.pow(math.pi,2)*e)*
math.pow((l/r)e,2)
Args:
l (float): distance between points of lateral support for the
compression flange, unbraced length [inches]
sx (float): section modulus of the box type member about its
major axis [inches^3]
a (float): total area enclosed within the center lines of the box
type member webs and flanges [inches^2]
sum_st (float): sum of the ratio width-to-thickness of each flange and
ratio of the depth to thickness of each web (neglect
any portion of the flange which projects beyond the
box section)
iy (float): second moment of area of the box type member about its
minor axis, [inches^4]
Returns:
fa_bc (float): allowable compression stress in extreme fibers of
box type flexure members
Notes:
1. Units in lbs and inches.
2. Poisson's ratio, mu, is taken as 0.3.
"""
ref_text = "AREMA 2018 Section 1.4.1 Table 15-1-11 Row 10 \n\n"
user_input = (f'l = {l:.2f}, Sx = {sx:.2f}, a = {a:.2f}, ' +
f'sum_st = {sum_st:.2f}, Iy = {iy:.2f}, Fy = {fy:.1f}, ' +
f'E = {e:.1f} \n\n')
mu = 0.3
lre = math.sqrt((1.105*math.pi/sx*math.sqrt(sum_st))/
(a*math.sqrt(iy/(1+mu))))
fa_bc = (0.55*fy-0.55*math.pow(fy,2)/
(6.3*math.pow(math.pi,2)*e)*math.pow(lre,2))
text1 = (f'(l/r)e = math.sqrt((1.105*math.pi/sx*math.sqrt(sum_st))/' +
f'(a*math.sqrt(iy/(1+mu)))) \n' +
f'(l/r)e = math.sqrt((1.105*math.pi/{sx:.2f}*math.sqrt({sum_st:.2f}))/' +
f'({a:.2f}*math.sqrt({iy:.2f}/(1+{mu:.2f})))) \n' +
f'(l/r)e = {lre:.2f} \n')
text2 = (f'fa_bc = (0.55*fy-0.55*math.pow(fy,2)/' +
f'(6.3*math.pow(math.pi,2)*e)*math.pow(lre,2)) \n' +
f'fa_bc = (0.55*{fy:.1f}-0.55*math.pow({fy:.1f},2)/' +
f'(6.3*math.pow(math.pi,2)*{e:.1f})*math.pow({lre:.2f},2)) \n' +
f'fa_bc = {fa_bc:.1f}')
text = ref_text + user_input + text1 + text2
return fa_bc, text | c112d32aa31b43b5342122d705cbff26999e841a | 687,972 |
def alphanumeric(password: str) -> bool:
"""
The string has the following conditions to be alphanumeric:
1. At least one character ("" is not valid)
2. Allowed characters are uppercase /
lowercase latin letters and digits from 0 to 9
3. No whitespaces / underscore
:param password:
:return:
"""
if password == "":
return False
for char in password:
if char.isalpha() or char.isdigit():
continue
return False
return True | ab07e6d65359e439d84b322ea055764077d2c3b3 | 687,973 |
def nested_get(dct, keys):
""" Gets keys recursively from dict, e.g. nested_get({test: inner: 42}, ["test", "inner"])
would return the nested `42`. """
for key in keys:
if isinstance(dct, list):
dct = dct[int(key)]
else:
dct = dct[key]
return dct | a8054e5080691a9d0d8ac3f71582b71ea82c29d9 | 687,975 |
def pos_neg(a: int, b: int, negative: bool) -> bool:
"""Differences in signed digits.
Return True if:
- negative is True and both a,b < 0.
- negative is False and
((a > 0 and b < 0) or (a < 0 and b > 0).
Return False otherwise.
"""
if negative:
return (a < 0 and b < 0)
return (a > 0 and b < 0) or (a < 0 and b > 0) | a29aa95c270f8bc3747bbbe88f8ab6f3676bead8 | 687,980 |
def args_cleanup( args, s ):
"""
Clean-up the substring s for keys in args
Arguments
---------
args: The dictionary to be parsed
s : Substring to be discarded. e.g. s = '--', then "--record" --> "record"
"""
if not isinstance( args, dict ) or not isinstance( s, str ):
raise ValueError( "Wrong input type. args should be type dict and s should be type str. {0:} and {1:} are rather given".format(
type( args ), type( str ) ) )
for old_key in list( args ) :
new_key = old_key.replace( s, '' )
args[ new_key ] = args.pop( old_key )
return args | fbfb5a3a63663b6a32080cfcfec4c856ed77fc2e | 687,984 |
import importlib
def import_module(dotted_path):
"""
Import a dotted module path. Raise ImportError if the import failed.
"""
return importlib.import_module(dotted_path) | e805d2e7a0c81d389ef58ea9ee43469bdaed4c5f | 687,985 |
def make_kms_map(map_string):
"""Convert a string into a map."""
# The one line version:
# dict({tuple(k.split(':')) for k in [i.strip() for i in m.split(',')]})
result = dict()
# split string by comma and strip
lines = [i.strip() for i in map_string.split(",")]
for line in lines:
# split into key/value pairs and store
k, v = line.split(":")
result[k] = v
return result | 10405d9e11f3ae7e262c0ceb4bedfa2f407a0ec0 | 687,991 |
def allowed_transitions(states):
"""
this function takes a set of states and uses it to compute the allowed transitions
it assumes the model is acyclic ie. individuals can only transition towards states
to the right of it in the list
Parameters
----------
states : list
a list with the set of states in the model
Returns
----------
list
all of the transitions possible as pairs (two-element lists)
"""
lst = []
for i in range(0, len(states)):
for j in range(i+1, len(states)):
lst.append([i, j])
return lst | baa09435de2fabad66f18e0c828f634af23190b6 | 687,993 |
import requests
def get_series_name(cfg, series_id):
"""
Request series information from Opencast.
:param series_id: Unique identifier for series
:param cfg: Opencast configuration
:return: Title of the series
"""
url = cfg['uri'] + "/api/series/" + series_id
r = requests.get(url=url, auth=(cfg['user'], cfg['password']))
x = r.json()
return x['title'] | 8580b2df7f7ed8ada382889a3e656aa5877d555d | 687,995 |
import contextlib
import wave
import audioop
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
n_frames = wf.getnframes()
data = wf.readframes(n_frames)
converted = audioop.ratecv(data, sample_width, num_channels, sample_rate, 32000, None)
return converted[0], 32000 | d87f538d8e5f055a1b5783911d3cd2584b887ee5 | 687,997 |
def calc_simple_profit(orders, kl_pd):
"""
计算交易收益,simple的意思是不考虑手续费
:param orders: AbuOrder对象序列
:param kl_pd: 金融时间序列,pd.DataFrame对象
:return:
"""
all_profit = 0
now_price = kl_pd[-1:].close
for order in orders:
if order.sell_type == 'keep':
# 单子如果还没有卖出,使用now_price计算收益
all_profit += (now_price - order.buy_price) * order.buy_cnt * order.expect_direction
else:
# 单子如卖出,使用sell_price计算收益
all_profit += (order.sell_price - order.buy_price) * order.buy_cnt * order.expect_direction
return all_profit | 956f820c65c0c9939682438003e54a8ddea4a836 | 687,999 |
def decimal_to_octal(decimal: int)->str:
"""Convert a Decimal Number to an Octal Number."""
if not isinstance(decimal , int):
raise TypeError("You must enter integer value")
rem , oct , c = 0 , 0 ,0
is_negative = '-' if decimal < 0 else ''
decimal = abs(decimal)
while decimal > 0:
rem = decimal % 8
oct += rem * pow(10 , c)
c+=1
decimal //= 8
return f'{is_negative}0o{oct}' | 503ae83edd4715af6f81cfac90b9ee57a5f9ab33 | 688,000 |
def rfam_problems(status):
"""
Create a list of the names of all Rfam problems.
"""
ignore = {"has_issues", "messages", "has_issue", "id"}
problems = sorted(n for n, v in status.items() if v and n not in ignore)
return problems or ["none"] | aafcf100cb82ded5c09d6c0da0f885ac3d5164b8 | 688,001 |
import logging
def create_dict(timespan, extremes, numbeats, mean_hr, beat_times):
"""Creates metrics dictionary with key ECG information
The metrics dictionary contains the the following info:
duration: float, voltage_extremes: float tuple, num_beats:
int, mean_hr_bpm: int, beats: list of floats
Args:
timespan (float): time duration of ECG strip
extremes (float tuple): (min, max) of lead voltages in file
numbeats (int): number of detected beats in file
mean_hr (int): average heart rate over file length
beat_times (list of floats): times when beat occurred
Returns:
dict: metrics dictionary with ECG statistics of the input file
"""
logging.info("Assigning dictionary entries")
metrics = {"duration": timespan, "voltage_extremes": extremes,
"num_beats": numbeats, "mean_hr_bpm": mean_hr,
"beats": beat_times}
return metrics | 9de6c74067b7d91c619cbfc3d17d640c938b9ea4 | 688,006 |
def preserve_sesam_special_fields(target, original):
"""
Preserves special and reserved fields.
ref https://docs.sesam.io/entitymodel.html#reserved-fields
"""
sys_attribs = ["_deleted","_hash","_id","_previous","_ts","_updated","_filtered", "$ids", "$children", "$replaced"]
for attr in sys_attribs:
if attr in original:
target[attr] = original[attr]
return target | 8934ca1a047ca39d48927bbfce59eeb8098594ae | 688,008 |
def unique_edge_list(network):
"""
Generates an edge list with only unique edges. This removes duplicates
resulting from the bidirectional nature of the edges.
:param network_simulator.Network network: source network
:return: list edge_list: unique set of edges
"""
edge_list = []
nodes = [network.network_dict[node] for node in network.nodes()]
for node in nodes:
adjacents = node.get_adjacents()
for adjacent in adjacents:
edge = (node.node_id, adjacent)
alternate = (adjacent, node.node_id)
if edge not in edge_list and alternate not in edge_list:
edge_list.append(edge)
return edge_list | 4d01288ec719b7260cc31edc1bd4c7c5b02907b9 | 688,009 |
def reverse_DNA(DNA_string):
"""
This function takes a DNA string and returns the reverse-complement sequence. It uses the
Nucleotides dictionary to change the nucleotides with and iterative for loop.
PARAMETERS
----------
DNA_string : string
DNA sequence of the FASTA/Q file
RETURNS
-------
The reverse-complement of the DNA_string.
"""
Nucleotides={"A": "T", "T": "A", "G": "C", "C": "G", "N": "N"}
return "".join(Nucleotides[DNA_string[i]] for i in range(len(DNA_string)-1,-1,-1)) | 718e036a56fd5fa508e6d739d6a589908a701a77 | 688,010 |
import math
def calc_rb_max(n_pe, beam_tot_z, beam_num_ptcl):
"""
Calculate the maximum radius of the plasma bubble
Valid in "strong bubble regime", where rb_max*k_pe >> 1.
Args:
n_pe: number density of the electron plasma
beam_tot_z: total length of the drive beam
beam_num_ptcl: number of e- in the drive beam
Returns:
rb_max: maximum radius of the plasma bubble
"""
# from Eq. (12) of LBN2017
rb_max = pow(2,7/8.)*pow(beam_num_ptcl/math.pi/n_pe,3/8.)/pow(beam_tot_z,1/8.)
return rb_max | b2e80992ab5b3c0fc93248d681bcb2c2cb476646 | 688,014 |
def get_window_start_times(profileDict):
"""
Gets the times of the start of the sampling windows used in the profiled
run
Args:
profileDict (dict): Dictionary of values representing an Arm MAP
profiled run
Returns:
List of start times of sampling window
"""
assert isinstance(profileDict, dict) and "samples" in profileDict
return profileDict["samples"]["window_start_offsets"] | 6f955551958f48d0c21d2c673f95dd140ff7f8e9 | 688,015 |
def extreme_points_2(contour):
"""Returns extreme points of the contour"""
extreme_left = tuple(contour[contour[:, :, 0].argmin()][0])
extreme_right = tuple(contour[contour[:, :, 0].argmax()][0])
extreme_top = tuple(contour[contour[:, :, 1].argmin()][0])
extreme_bottom = tuple(contour[contour[:, :, 1].argmax()][0])
return extreme_left, extreme_right, extreme_top, extreme_bottom | 637e6e9383dd496c7a2a604076ecddf4377b0b18 | 688,017 |
def connect(endpoint=None):
"""Generate connect packet.
`endpoint`
Optional endpoint name
"""
return u'1::%s' % (
endpoint or ''
) | bce21b5f7796ec26e5e238e68427decf2e34d46d | 688,018 |
def use_filter(filter, url, input):
"""Apply a filter function to input from an URL"""
output = filter(url, input)
if output is None:
# If the filter does not return a value, it is
# assumed that the input does not need filtering.
# In this case, we simply return the input.
return input
return output | a1a0005b7e792eb9a36523709ed93a1bbd826f19 | 688,020 |
import sqlite3
def db_retrieve_all(cursor: sqlite3.Cursor, table: str) -> list:
"""Get all values of a table."""
ret = cursor.execute(f"SELECT * FROM {table}")
return ret.fetchall() | c92f0b8a0c25d4f7bb351a28d4f5121f434e872e | 688,021 |
import struct
def _compute_dc42_checksum(data):
"""Compute checksum DC42 uses to verify sector and tag data integrity.
Args:
data: data to compute a checksum for.
Returns: a 32-bit (big endian) checksum as a 2-byte string.
"""
def addl_rorl(uint, csum):
"""Add `uint` to `csum`; 32-bit truncate; 32-bit rotate right one bit."""
csum += uint # add uint
csum &= 0xffffffff # truncate
rbit = csum & 0x1 # rotate part 1 (save low-order bit)
csum >>= 1 # rotate part 2 (shift right)
csum += rbit << 31 # rotate part 3 (prepend old low-order bit)
return csum
# Loop over all two-byte words in the data and include them in the checksum.
checksum = 0
for word_bytes in [data[i:i+2] for i in range(0, len(data), 2)]:
word = struct.unpack('>H', word_bytes)[0] # big endian word bytes to native
checksum = addl_rorl(word, checksum) # add to checksum
# return result as a big-endian 32-bit word.
return struct.pack('>I', checksum) | 13d048ed2878e09d638b50208a33957fc2af04b5 | 688,023 |
import re
def regex_overlap(text, regex):
""" for a list of tokens in text and a regex, match it in the
tokens and return a list of booleans denoting which tokens are
a part of the match """
overlap = [False for t in text]
for m in re.finditer(regex, ' '.join(text)):
begin, end = m.span()
i = 0
for j, t in enumerate(text):
if begin <= i < end or begin <= i+len(t) < end:
overlap[j] = True
i += len(t) + 1
return overlap | 77fbb138cb98f2e4f7a6d14c932c17666cac6c1e | 688,024 |
def get_product_img_href(product_page_object) -> str:
"""
Get product image href link from product page-object.
:param product_page_object: BeautifulSoup4 page object
:return: string representation of product image href
"""
anchor_element = product_page_object.find("figure", {"class": "offer-thumb__image"}).find("a", {"class": "lazy"})
return anchor_element.get("data-original") | 30fd9781380c5a87eaf19a54977a10f13114d2a0 | 688,026 |
def __number_measurements(a, func_axis=None):
""" Calculates the number of measurements of an array from the array and the function axis.
"""
if func_axis == None:
return a.size
else:
return a.size / a.shape[func_axis] | cea5e89077b0f0cf493ee54d2649ef70aa92210c | 688,027 |
def crop_image_to_bbox(img_arr, X_meta):
"""Given an image array, crops the image to just the bounding box provided."""
shape = img_arr.shape
x_min = int(X_meta['x'])
x_max = int(X_meta['x'] + X_meta['width'])
y_min = int(X_meta['y'])
y_max = int(X_meta['y'] + X_meta['height'])
return img_arr[y_min:y_max, x_min:x_max] | 87e76c0712b6ffa99d7a6bafc74732eea179fffb | 688,030 |
def grid_case_group(self, group_id):
"""Get a particular grid case group belonging to a project
Arguments:
groupId(int): group id
Returns:
:class:`rips.generated.generated_classes.GridCaseGroup`
"""
case_groups = self.grid_case_groups()
for case_group in case_groups:
if case_group.group_id == group_id:
return case_group
return None | 6abde2d46408f98e3281d24b77916055431a5df9 | 688,031 |
import re
def lineStartsWithDate(line):
"""
checks to see if the line starts with a date
"""
match = re.search("\d\d\d\d\-\d\d\-\d\d", line )
if (re.search("\d\d\d\d\-\d\d\-\d\d", line ) ):
return True
else:
return False | f04642987b56056fb191911b5f0c74d6f5e2e656 | 688,033 |
def version_tuple(version):
"""
Convert a version string or tuple to a tuple.
Will return (major, minor, release) kind of format.
"""
if isinstance(version, str):
return tuple(int(x) for x in version.split('.'))
elif isinstance(version, tuple):
return version | 01dbb657ced6a9f96ebda18c761d014bf53fd168 | 688,036 |
def _dso2info(dso):
"""Return mangled name of DSO info module.
eg. 'my.pkg.libs.adso' -> 'my.pkg.libs.adso_dsoinfo'
"""
parts = dso.split('.')
parts[-1] = '{}_dsoinfo'.format(parts[-1])
return '.'.join(parts) | 164d1758d662d5112a1d787323ed33acdba63a81 | 688,037 |
def accuflux(idxs_ds, seq, data, nodata):
"""Returns maps of accumulate upstream <data>
Parameters
----------
idxs_ds : 1D-array of intp
index of next downstream cell
seq : 1D array of int
ordered cell indices from down- to upstream
data : 1D array
local values to be accumulated
nodata : float, integer
nodata value
Returns
-------
1D array of data.dtype
accumulated upstream data
"""
# intialize output with correct dtype
accu = data.copy()
for idx0 in seq[::-1]: # up- to downstream
idx_ds = idxs_ds[idx0]
if idx0 != idx_ds and accu[idx_ds] != nodata and accu[idx0] != nodata:
accu[idx_ds] += accu[idx0]
return accu | 3a581dee6be3ef076876965a25a7ae081f90ecbf | 688,039 |
def topic_exists(client, topic_name):
"""
Reports if the topic is created
Args:
client (Kafka Client): The Kafka admin client
topic_name (str): the topic name to be checked
"""
topic_data = client.list_topics(timeout=2)
return topic_name in set(t.topic for t in iter(topic_data.topics.values())) | 824b858d1b7b376e78c5a6b1bae875e874868bdb | 688,040 |
def _from_json(doc):
"""Invert _to_json()"""
if "$numberLong" in doc:
return int(doc["$numberLong"])
elif "__nonstring_keys" in doc:
nonstring_keys = doc.pop("__nonstring_keys")
return {nonstring_keys[k]: v for k, v in doc.items()}
else:
return doc | f6ebd40a4f375f6d320692328f9a7c362c73f17c | 688,044 |
def _change_memo(amount, coins, T, n):
"""Helper function for num_coin_changes_memo()."""
# Base cases.
if amount < 0:
return 0
if amount == 0:
return 1
if n <= 0 and amount > 0:
return 0
# Apply memoization.
if T[n][amount]:
return T[n][amount]
# Sum num of ways with coin n included & excluded.
T[n][amount] = (_change_memo(amount - coins[n - 1], coins, T, n)
+ _change_memo(amount, coins, T, n - 1))
return T[n][amount] | dacc57c364d803de9fea799a7a43656d387671aa | 688,046 |
import re
def clean_str(text):
"""
Apply some standard text cleaning with regular expressions.
1. Remove unicode characters.
2. Combine multiline hyphenated words.
3. Remove newlines and extra spaces.
Parameters
----------
text : str
Text to clean.
Returns
----------
text : str
Cleaned text.
Examples
----------
>>> text = 'I am a \nbad\r\n\tstr-\ning.'
>>> print(text)
I am a
bad
str-
ing.
>>> text = clean_str(text)
>>> print(text)
I am a bad string.
"""
# Remove unicode characters.
text = text.decode('utf-8')
text = re.sub(r'[^\x00-\x7F]+', ' ', text)
# Combine multiline hyphenated words.
text = re.sub('-[\s]*[\r\n\t]+', '', text, flags=re.MULTILINE)
# Remove newlines and extra spaces.
text = re.sub('[\r\n\t]+', ' ', text, flags=re.MULTILINE)
text = re.sub('[\s]+', ' ', text, flags=re.MULTILINE)
return text | 9cad22ab843823416ead46ae34dbff0ed991d597 | 688,050 |
def additionner_deux_nombres(premier_nombre, second_nombre):
"""
Additionne deux nombres
inputs:
Deux nombres
outputs:
Un nombre
"""
total = premier_nombre + second_nombre
return total | 652435df9772eaebfe8669a910977a8089be3c09 | 688,057 |
def removeOutlier(df_in, col_name):
""" This funtion drops all outliers in a pandas dataframe according to the
specified column with the IQR method.
Input:
- df_in: pandas dataframe that the outliers will be removed from.
- col_name: name of the column that the IQR will be calculated on.
"""
q1 = df_in[col_name].quantile(0.25)
q3 = df_in[col_name].quantile(0.75)
iqr = q3-q1 #Interquartile range
fence_low = q1-1.5*iqr
fence_high = q3+1.5*iqr
df_out = df_in.loc[(df_in[col_name] > fence_low) & (df_in[col_name] < fence_high)]
return df_out | 8b489b149740a3bb3b7b2453328b54052cba8b26 | 688,059 |
def static_params_to_dygraph(model, static_tensor_dict):
"""Simple tool for convert static paramters to dygraph paramters dict.
**NOTE** The model must both support static graph and dygraph mode.
Args:
model (nn.Layer): the model of a neural network.
static_tensor_dict (string): path of which locate the saved paramters in static mode.
Usualy load by `paddle.static.load_program_state`.
Returns:
[tensor dict]: a state dict the same as the dygraph mode.
"""
state_dict = model.state_dict()
# static_tensor_dict = paddle.static.load_program_state(static_params_path)
ret_dict = dict()
for n, p in state_dict.items():
ret_dict[n] = static_tensor_dict[p.name]
return ret_dict | 66d322447f972d9cd8e64bb95023a4b803fba63c | 688,060 |
def accept_peaks_size_width(time, data, peak_inx, index, min_inx, threshold, pfac=0.75):
"""
Accept each detected peak and compute its size and width.
Args:
time (array): time, must not be None
data (array): the data with teh peaks
peak_inx: index of the current peak
index: current index
min_inx: index of the previous trough
threshold: threshold value
pfac: fraction of peak height where its width is measured
Returns:
time (float): time of the peak
height (float): height of the peak (value of data at the peak)
size (float): size of the peak (peak minus previous trough)
width (float): width of the peak at 0.75*size
count (float): zero
"""
size = data[peak_inx] - data[min_inx]
wthresh = data[min_inx] + pfac * size
width = 0.0
for k in range(peak_inx, min_inx, -1):
if data[k] < wthresh:
width = time[peak_inx] - time[k]
break
for k in range(peak_inx, index):
if data[k] < wthresh:
width += time[k] - time[peak_inx]
break
return [time[peak_inx], data[peak_inx], size, width, 0.0], None | 63728b265619566c7b4e23c9149b2b67253e5ab1 | 688,061 |
import base64
def extract_salt(salted_ciphertext: bytes):
"""
Extract salt and ciphertext from salted ciphertext and returns as a tuple
"""
encoded_salt, ciphertext = salted_ciphertext.split(b':')
salt = base64.urlsafe_b64decode(encoded_salt)
return salt, ciphertext | 023de8e7288c65564946c020f981c7395d961a48 | 688,065 |
def get_optimizer_variables(optimizer):
"""Returns a list of variables for the given `tf.compat.v1.train.Optimizer`.
Equivalent to `optimizer.variables()`.
Args:
optimizer: An instance of `tf.compat.v1.train.Optimizer` which has created
variables (typically after a call to `Optimizer.minimize`).
Returns:
A list of variables which have been created by the `Optimizer`.
"""
return optimizer.variables() | dd0fd0933969bba78ed0609ebd14612c39ae5452 | 688,067 |
import datetime
def datetime_formatter(key, time_format='%Y/%m/%d %H:%M:%S'):
"""Create a datetime formatting function
This factory creates a function that formats a specified key and with a
timestamp value from a dictionary into a string.
Parameters
----------
key
The dictionary key to format. The corresponding value should be a
timestamp.
time_format
A format string suitable for strftime().
Returns
-------
format
A factory function that takes a dictionary and returns a string.
"""
def format(**kwargs):
dt = datetime.datetime.fromtimestamp(kwargs[key])
return key + ': ' + dt.strftime(time_format)
return format | c0a0188e22c6b9dd36b69953543daf6a1f2c3348 | 688,069 |
def scale_array(arr, s):
"""Scale an array by s
Parameters:
1. array: a numeric array or list
2. s: scaling factor, real number
"""
return [a*s for a in arr] | 3b7cbec86a73c8e5f99f08ce8eb091ce6aecf4ac | 688,072 |
def unwrap_model(model):
"""
Recursively unwraps a model from potential containers (as used in distributed training).
Args:
model (`torch.nn.Module`): The model to unwrap.
"""
# since there could be multiple levels of wrapping, unwrap recursively
if hasattr(model, "module"):
return unwrap_model(model.module)
else:
return model | 4c36d3fa14fd41764c4f3b3352418b8d6981e713 | 688,074 |
def get_header(results):
"""Extracts the headers, using the first value in the dict as the template."""
ret = ['name', ]
values = next(iter(results.values()))
for k, v in values.items():
for metric in v.keys():
ret.append('%s:%s' % (k, metric))
return ret | e4efb44ea450d08f884a6c38ef24d49ce7e0af5c | 688,075 |
def unpack_dataset_joint_variables(dataset, n_dof):
"""
Unpacks a dataset in the format with examples in rows with joint positions, velocities and accelerations in
columns (in that order)
Returns matrices q, qv, qa; containing rows of examples for joint positions, velocities and accelerations
"""
q = dataset[:, 0:n_dof] # joint positions
qv = dataset[:, n_dof:n_dof * 2] # joint velocities
qa = dataset[:, n_dof * 2:] # joint accelerations
return q, qv, qa | d3152365f777c830888f30dec6aa00b16d246146 | 688,077 |
def spell_stats(user):
"""
Get's the player/boss' def, name and str.
Useful as it's stored differently between bosses and players in a battle
Returns
-------
Any
float - The defence of the user.
str - The name of the account.
float - The defence of the account
"""
# Get user stats for player/boss (as the data is stored differently)
try:
# Player
u_def = user['account']['stats']['defense']
u_name = user['user'].name
u_str = user['account']['stats']['strength']
except KeyError:
# Boss
u_def = user['stats']['defense']
u_name = user['name']
u_str = user['stats']['strength']
return u_def, u_name, u_str | f4fa6425dbf18cf8a7e7a50c44cb3a8092f085ac | 688,079 |
def wordlists(*wl):
""" Input is arbitrary number of lists of strings.
Output is one dictionary where each string is a key
and the count of those strings is the value """
word_dict = {}
for i in wl:
for x in i:
if x in word_dict:
word_dict[x] += 1
else:
word_dict[x] = 1
return word_dict | 4af789da735886447c02f089065e30b46f348b22 | 688,080 |
import torch
def softmax(X):
"""
Entropy-smoothed max, a.k.a. logsumexp.
Solves $max_{p \in \Delta^d} <x, p> - \sum_{i=1}^d p_i \log(p_i)$ along
dim=2.
:param x: torch.Tensor, shape = (b, n, m)
Vector to project
:return: torch.Tensor, shape = (b, n)
Projected vector
"""
M, _ = torch.max(X, dim=2)
X = X - M[:, :, None]
S = torch.sum(torch.exp(X), dim=2)
M = M + torch.log(S)
return M | 815422860956e60fd9993325cf5d9bee83ffe759 | 688,086 |
import torch
def matperm2listperm(matperm):
"""Converts permutation matrix to its enumeration (list) form.
Args:
matperm: (..., n, n)
Returns:
listperm: (..., n) - listperm[t,i] is the index of the only non-zero entry in matperm[t, i, :]
"""
batch_size = matperm.size(0)
n = matperm.size(-1)
assert matperm.size(-2) == matperm.size(-1)
#argmax is the index location of each maximum value found(argmax)
# _, argmax = torch.max(matperm, dim=-1, keepdim=False)
argmax = torch.argmax(matperm, dim=-1, keepdim=False)
# argmax = argmax.view(batch_size, n_objects)
return argmax | a825383e3e3c0e48c65fc7100265e2abafe8a4b7 | 688,087 |
def _warp_dir(intuple, nlevels=3):
"""
Extract the ``restrict_deformation`` argument from metadata.
Example
-------
>>> _warp_dir(("epi.nii.gz", {"PhaseEncodingDirection": "i-"}))
[[1, 0, 0], [1, 0, 0], [1, 0, 0]]
>>> _warp_dir(("epi.nii.gz", {"PhaseEncodingDirection": "j-"}), nlevels=2)
[[0, 1, 0], [0, 1, 0]]
"""
pe = intuple[1]["PhaseEncodingDirection"][0]
return nlevels * [[int(pe == ax) for ax in "ijk"]] | a5558dcdb4d7f0893789a2f6ba98acb2fa10f303 | 688,094 |
def update_position(dir, x, y):
"""Returns the updated coordinates depending on the direction of the path"""
if dir == 'DOWN':
return x, y + 1
elif dir == 'UP':
return x, y - 1
elif dir == 'LEFT':
return x - 1, y
elif dir == 'RIGHT':
return x + 1, y | 5a12004c41012dd7ab70891367734495dad12177 | 688,095 |
from typing import Callable
from typing import Any
def lazy_property(function: Callable) -> Any:
"""Allows to avoid recomputing a property over and over.
The result gets stored in a local var. Computation of the property
will happen once, on the first call of the property. All
succeeding calls will use the value stored in the private property."""
attr_name = "_lazy_" + function.__name__
@property # type: ignore
def _lazyprop(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, function(self))
return getattr(self, attr_name)
return _lazyprop | 0974d69d4335a4edb702175b73f0ac6456adeae7 | 688,096 |
def region_mapping(region):
"""Map the user supplied region to the hostname for the AMP for Endpoints console
"""
region_map = {
"apjc": "console.apjc.amp.cisco.com",
"eu": "console.eu.amp.cisco.com",
"nam": "console.amp.cisco.com",
}
return region_map[region.lower()] | 11773a9b26700ba55c38c306123feebff997ba50 | 688,098 |
import torch
def get_accuracy(outputs, labels):
"""From Binary cross entropy outputs to accuracy"""
mask = outputs >= 0.5
accuracy = 1. - torch.mean(torch.abs(mask.float() - labels)).item()
return accuracy | d2e02c517f193d58785f7a2b0a6c602924b88f5f | 688,099 |
from typing import Tuple
def get_choosen_building(
building: Tuple[str, str],
choice: str
) -> str:
"""
Get the specific building that have been selected by the user.
Parameters
----------
building: Tuple[str, str]
A tuple containing 2 randomly selected building.
choice: str
The valid choice given by the user.
Returns
-------
building: str
The specific building that selected by the user.
"""
x, y = building
return x if '1' == choice else y | da43ebfaa68137eb2970eb959084e76c15d1674a | 688,101 |
def unique_list(input_, key=lambda x:x):
"""Return the unique elements from the input, in order."""
seen = set()
output = []
for x in input_:
keyx = key(x)
if keyx not in seen:
seen.add(keyx)
output.append(x)
return output | 1616f89dbd0e3e65af28d8210fb201cb309d3ce8 | 688,105 |
def pos_neg_split(df):
"""
Splits DataFrame into two separate positive and
negative DataFrames for the creation of two
separate models for LDAvis.
INPUT: Sentiment-analyzed DataFrame
OUTPUT: A positive DataFrame and negative DataFrame
"""
neg = df[df['Analysis'] == 'Negative']
pos = df[df['Analysis'] == 'Positive']
return neg, pos | 84ca8c810d7ec5a65792050b169f5b34cc3aaf78 | 688,106 |
def collapse_complexes(data, conjugate_flag=False):
"""Given a list or other iterable that's a series of (real, imaginary)
pairs, returns a list of complex numbers. For instance, given this list --
[a, b, c, d, e, f]
this function returns --
[complex(a, b), complex(c, d), complex(e, f)]
The returned list is a new list; the original is unchanged.
"""
# This code was chosen for speed and efficiency. It creates an iterator
# over the original list which gets called by izip. (izip() is the same
# as the builtin zip() except that it returns elements one by one instead
# of creating the whole list in memory.)
# It's the fastest method of the 5 or 6 I tried, and I think it is also
# very memory-efficient.
# I stole it from here:
# http://stackoverflow.com/questions/4628290/pairs-from-single-list
data_iter = iter(data)
if not conjugate_flag:
tmp = [complex(r, i) for r, i in zip(data_iter, data_iter)]
else:
tmp = [complex(r, -1*i) for r, i in zip(data_iter, data_iter)]
return tmp | 12d089ebc6b1fb882e0ae32fc71740794b595f00 | 688,107 |
def _code_in_list(code, codelist):
"""Tells if `code` is contained in `codelist`
Examples:
- 401 is not contained in ['3xx', '404', '5xx']
- 404 is contained in ['3xx', '404', '5xx']
- 503 is contained in ['3xx', '404', '5xx']
"""
# status codes to exclude
exact_codes = [code for code in codelist if 'x' not in code]
if str(code) in exact_codes:
return True
# classes of status code to exclude
class_codes = [code[0] for code in codelist if 'x' in code]
if str(code)[0] in class_codes:
return True
return False | 6168f158a1852d67475d63acc4f72d2f089ae1bd | 688,109 |
def reflect_y(x, y, matrix):
"""Reflect the index horizontally."""
return x, matrix.rows - 1 - y | 311306632a8abd3080ec51e21ffffe96c2aa417f | 688,110 |
async def _pinned(db, community):
"""Get a list of pinned post `id`s in `community`."""
sql = """SELECT id FROM hive_posts
WHERE is_pinned = '1'
AND is_deleted = '0'
AND community = :community
ORDER BY id DESC"""
return await db.query_col(sql, community=community) | 194a071e19fb0fdef008b55b13584e8818c7fa03 | 688,114 |
from typing import List
from typing import Dict
def get_user_inputs(title: str,
captions: List[str],
retvals_size: int = 1024) -> Dict[str, str]:
"""Show text inputs to user and get values from them.
Parameters
----------
title : str
Popup title.
captions : List[str]
Names of input fields.
retvals_size : int, optional
Maximum number of characters that will be retrieved for each
field. User may enter more, but only the first `retvals_size`
will be returned. (default=1024)
Returns
-------
Dict[str,str]
Dictionary of pairs {caption: response}.
Raises
------
RuntimeError
When user clicked the Cancel button.
"""
success, _, _, _, retvals_csv, _ = RPR.GetUserInputs( # type:ignore
title, len(captions), ",".join(captions), "", retvals_size)
if success:
return dict(zip(captions, retvals_csv.split(",")))
else:
raise RuntimeError('User clicked Cancel.') | 463945a187327d704edca20a75c2cb408592feff | 688,115 |
def parse_yaml_beam_args(pipeline_args):
"""Converts yaml beam args to list of args TFX accepts
Args:
pipeline_args: dict specified in the config.yml
Returns:
list of strings, where each string is a beam argument
"""
return ['--{}={}'.format(key, value) for key, value in
pipeline_args.items()] | 583f9d911502daf1bd78ab70955695d41379a1ac | 688,117 |
def are_you_sure(question: str):
"""
Loop while asking a Y/N question.
Adds str(" (Y/N): ") to the end of the provided question.
"""
while True:
answer = str(input(question + " (Y/N): ")).lower()
if answer.startswith("y"):
result = True
break
elif answer.startswith("n"):
result = False
break
else:
pass
return result | a37d95f0a90fdf5380e01c4c18180f72b11ae28f | 688,119 |
def first_string(group):
"""Return the first value in the group."""
for item in group:
if item:
return item
return '' | 8ee218fbfa8be328f304b5fdddb94e491b09bf45 | 688,121 |
def tensor_to_op_name(tensor_name):
"""Strips tailing ':N' part from a tensor name.
For example, 'dense/kernel:0', which is a tensor name, is converted
to 'dense/kernel' which is the operation that outputs this tensor.
Args:
tensor_name: tensor name.
Returns:
Corresponding op name.
"""
parts = tensor_name.split(':')
if len(parts) == 1:
return tensor_name
assert len(parts) == 2
return parts[0] | c582931a4e5b3bf07b0a5628b47a26cdeced88db | 688,123 |
def remove(list_, item):
"""Removes an item from a list."""
return [i for i in list_ if i != item] | b651e51ce60aa077dc908ada1564e44158be2352 | 688,126 |
def _buildSpectrumFromQIisotopes(mz, isotopeDistribution, delta=1.0033550000000009):
"""
Build a mass spectrum from a QI mz value, and isotopic distribution pattern.
:param float mz: m/z of the lightest isotope
:param str isotopeDistribution: Hyphenated list of isotopic abundances ordered by 1Da intervals
:param float delta: Isotopic mass difference, defaults to :sup:`13`\ C (1.0033550000000009)
:returns: Reconstructed mass spectrum as a list of (mass, float) tuples
:rtype: list[(float, float),]
"""
spectrum = list()
count = 0
if '-' in isotopeDistribution:
for isotope in isotopeDistribution.split(' - '):
spectrum.append((mz + (delta * count), float(isotope)))
count += 1
else:
# No isotopes
spectrum.append((mz, 100))
return spectrum | 36295828edd0a9459474445a0d6ce1370e6e4b88 | 688,131 |
def generate_path(start, ref, nonref, stop):
"""
Given source, sink, and ref/non-ref nodes enumerate all possible paths
"""
ref_path = [x for x in [start, ref, stop] if x != "0"]
nonref_path = [x for x in [start, nonref, stop] if x != "0"]
return [ref_path, nonref_path] | 82dd488efe95fc3986890229ca5673f62cec6066 | 688,133 |
def grid_from_data(da):
"""Given a dataset, extract only the grid information.
Parameters
----------
da : xarray.DataArray
DataArray to extract grid information from. Must have "ocw" conventions,
ie 2D lats and lons variables.
Returns
-------
ds : xarray.Dataset
Dataset containing grid infromation.
"""
ds = da.coords.to_dataset()
return ds | e2cab8d978f40372540d379087e1a835a9e6521f | 688,136 |
import torch
def interpolate_irreg_grid(interpfunc, pos):
"""Interpolate the funtion
Args:
interpfunc (callable): function to interpolate the data points
pos (torch.tensor): positions of the walkers Nbatch x 3*Nelec
Returns:
torch.tensor: interpolated values of the function evaluated at pos
"""
nbatch, nelec, ndim = pos.shape[0], pos.shape[1]//3, 3
return torch.as_tensor(interpfunc(pos.reshape(nbatch, nelec, ndim))) | 3509f7a102211b43b4964ef908a0ce0a68579e53 | 688,137 |
from typing import Union
def cast_to(type_hint, value) -> Union[str, float, int]:
"""Cast a value to the corresponding type_hint.
If it fails, it will give a ValueError.
Args:
type_hint ([type]): The desired final type.
value : Value to be casted into the desired final type.
Returns:
Union[str, float, int]: The value in the corresponding type.
"""
if type_hint == 'float':
value = float(value)
elif type_hint == 'int':
value = int(value)
return value | f941aa459badd65e4e6e693572af654288175ecf | 688,138 |
def get_cv(m1: float, m2: float) -> float:
"""Compute coefficient of variation.
"""
return (m2 - m1**2)**0.5 / m1 | becf1149bdef4cd2906d86e7e304f2f4f64ada18 | 688,141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.