content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def max_multiple(divisor, bound):
"""
Finds the largest dividable integer that is lower than bound.
:param divisor: positive integer.
:param bound: positive integer.
:return: the largest integer N, such that, N is divisible by divisor, N is less
than or equal to bound, and N is greater than 0.
"""
return bound - (bound % divisor) | 269bae35cee1d0e3fa0199cc430e6c6f4eb3d3e3 | 690,024 |
import json
def get_count(json_filepath):
"""Reads the count from the JSON file and returns it"""
with open(json_filepath) as json_file:
data = json.load(json_file)
try:
return data["count"]
except KeyError:
return None | 22e6851d0739b36fd6fa179828ccb8e2f0a8e06e | 690,027 |
from bs4 import BeautifulSoup
import six
def _convert_toc(wiki_html):
"""Convert Table of Contents from mediawiki to markdown"""
soup = BeautifulSoup(wiki_html, 'html.parser')
for toc_div in soup.findAll('div', id='toc'):
toc_div.replaceWith('[TOC]')
return six.text_type(soup) | 90e1c58f194f54004539ee4cece082988add5050 | 690,035 |
import re
import logging
def check_edge(graph, edge_label):
"""
Parameters
----------
graph : nx.DiGraph
A graph.
edge_label : str
Edge label.
Returns
-------
int
Counts how many edges have the property `label` that matches `edge_label`.
"""
edge_label = re.compile(edge_label)
filtered = [triple for triple in graph.edges.data('label') if edge_label.search(triple[2])]
for v, u, label in filtered:
logging.info(f"{graph.nodes[v]['label']}.{label} -> {graph.nodes[u]['label']}")
return len(filtered) | 44b81afbfbe5533a96ee941040ab5b09d00b687e | 690,038 |
def _str_to_bytes(s):
"""Convert str to bytes."""
if isinstance(s, str):
return s.encode('utf-8', 'surrogatepass')
return s | 79e39f852461c87023f8ee69eb2cc392f0542b3e | 690,045 |
from typing import Any
def _pydantic_dataclass_from_dict(dict: dict, pydantic_dataclass_type) -> Any:
"""
Constructs a pydantic dataclass from a dict incl. other nested dataclasses.
This allows simple de-serialization of pydentic dataclasses from json.
:param dict: Dict containing all attributes and values for the dataclass.
:param pydantic_dataclass_type: The class of the dataclass that should be constructed (e.g. Document)
"""
base_model = pydantic_dataclass_type.__pydantic_model__.parse_obj(dict)
base_mode_fields = base_model.__fields__
values = {}
for base_model_field_name, base_model_field in base_mode_fields.items():
value = getattr(base_model, base_model_field_name)
values[base_model_field_name] = value
dataclass_object = pydantic_dataclass_type(**values)
return dataclass_object | 016e4285dc89d98fa988cc45aea5093d90bdf478 | 690,046 |
import importlib
def load_module(mod):
"""Load a python module."""
module = importlib.import_module(mod)
print(module, mod)
return module | 7cfe432b2837ce5d7531f1d6cbc0b279e0c353e2 | 690,047 |
def gcd(a, b):
"""Returns the greatest common divisor of a and b, using the Euclidean
algorithm."""
if a <= 0 or b <= 0:
raise ValueError('Arguments must be positive integers')
while b != 0:
tmp = b
b = a % b
a = tmp
return a | 8560344ce9af6c3835560cbda9f03963f4a21518 | 690,051 |
def find_server(dbinstance, params):
"""
Find an existing service binding matching the given parameters.
"""
lookup_params = params.copy()
for srv in dbinstance.servers:
# Populating srv_params must be in sync with what lookup_target()
# returns
srv_params = {}
for attr in ("host", "cluster", "service_address",
"address_assignment", "alias"):
value = getattr(srv, attr, None)
if value:
srv_params[attr] = value
if lookup_params == srv_params:
return srv
return None | 3525f5b246ef8943e2c710ed5f2313982eabb4c2 | 690,054 |
def __is_global(lon, lat):
"""
check if coordinates belong to a global dataset
Parameters
----------
lon : np.ndarray or xarray.DataArray
lat : np.ndarray or xarray.DataArray
Returns
-------
bool
"""
if lon.max() - lon.min() > 350 and lat.max() - lat.min() > 170:
return True
return False | 82340b5fef5d1826fac0eabd4c0adba0a6525a41 | 690,058 |
def correctPR(text):
"""
Remove the trailing space in the PR avlues.
:param text:
:return: corrected PR
"""
return text.replace(" ", "") | b18c5074dc584b08c9d5d6f024097e7a30c1ff18 | 690,068 |
def cubo_oct_coord_test(x, y, z): # dist2 = 2
"""Test for coordinate in octahedron/cuboctahedron grid"""
return (x % 2 + y % 2 + z % 2) == 2 | bb77a4660b98282f71932a4a2e8c3f97d8aebbd4 | 690,071 |
def get_colnames(main_colnames=None, error_colnames=None, corr_colnames=None,
cartesian=True):
"""
Utility function for generating standard column names
"""
if main_colnames is None:
if cartesian:
# main_colnames = [el for el in 'XYZUVW']
main_colnames = ['X', 'Y', 'Z', 'U', 'V', 'W']
else: # provide astrometric column names
main_colnames = [
'ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity',
]
if error_colnames is None:
error_colnames = [el+'_error' for el in main_colnames]
if corr_colnames is None:
corr_colnames = []
for i, colname1 in enumerate(main_colnames):
for colname2 in main_colnames[i + 1:]:
corr_colnames.append('{}_{}_corr'.format(
colname1, colname2
))
return main_colnames, error_colnames, corr_colnames | 7139a73dbc2479a1899cdce1a9cb55d3c70a6b0b | 690,072 |
def jpeg_header_length(byte_array):
"""Finds the length of a jpeg header, given the jpeg data in byte array format"""
result = 417
for i in range(len(byte_array) - 3):
if byte_array[i] == 0xFF and byte_array[i + 1] == 0xDA:
result = i + 2
break
return result | f6e3716264a0014990d23a3ccf105ae8695459d4 | 690,080 |
def get_cluster_id_by_name(dataproc, project_id, region, cluster_name):
"""Helper function to retrieve the ID and output bucket of a cluster by
name."""
for cluster in dataproc.list_clusters(project_id, region):
if cluster.cluster_name == cluster_name:
return cluster.cluster_uuid, cluster.config.config_bucket | 0ca5ea6183fbac0ceea6671d6aff1786ea26c3ca | 690,083 |
def diagpq(p, q=0):
"""
Returns string equivalent metric tensor for signature (p, q).
"""
n = p + q
D = []
for i in range(p):
D.append((i*'0 ' +'1 '+ (n-i-1)*'0 ')[:-1])
for i in range(p,n):
D.append((i*'0 ' +'-1 '+ (n-i-1)*'0 ')[:-1])
return ','.join(D) | 29e86f72338d31e8791f68331618273b17eb2cd9 | 690,088 |
def mag_to_flux(mag, zeropoint):
"""Convert a magnitude into a flux.
We get the conversion by starting with the definition of the magnitude scale.
.. math::
m = -2.5 \\log_{10}(F) + C
2.5 \\log_{10}(F) = C - m
F = 10^{\\frac{C-m}{2.5}}
:param mag: magnitdue to be converted into a flux.
:param zeropoint: zeropoint (in mags) of the magnitude system being used
:return: flux that corresponds to the given magnitude
"""
return 10**((zeropoint - mag)/2.5) | e3fd5d7cd97fd97517f42ed31a385a8b8b90c694 | 690,089 |
def make_init_message(*, dim, order, dt, t_final,
nstatus, nviz, cfl, constant_cfl,
initname, eosname, casename,
nelements=0, global_nelements=0):
"""Create a summary of some general simulation parameters and inputs."""
return(
f"Initialization for Case({casename})\n"
f"===\n"
f"Num {dim}d order-{order} elements: {nelements}\n"
f"Num global elements: {global_nelements}\n"
f"Timestep: {dt}\n"
f"Final time: {t_final}\n"
f"CFL: {cfl}\n"
f"Constant CFL: {constant_cfl}\n"
f"Initialization: {initname}\n"
f"EOS: {eosname}\n"
) | dcc00c03d79b27ea9b2f5dcc8947b9a2d8e409d2 | 690,090 |
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist)) | e071892f28c7ee3ce85256bcaf42aae802a2ce7d | 690,100 |
def quote_remover(var):
"""
Helper function for removing extra quotes from a variable in case it's a string.
"""
if type(var) == str:
# If string, replace quotes, strip spaces
return var.replace("'", "").replace('"','').strip()
else:
# If not string, return input
return | 071cf9c790f76cd867e65001555d2429f01a4a20 | 690,101 |
def sieve(limit):
"""Returns list of prime numbers to given limit."""
numbers = [x for x in range(2, limit + 1)]
primes = []
while numbers:
primes.append(numbers.pop(0))
for item in numbers:
if not item % primes[-1]:
numbers.remove(item)
return primes | 3da085056a1ef5e5bb271fb05ead7e88f7b31a71 | 690,105 |
def float_to_fp(signed, n_bits, n_frac):
"""Return a function to convert a floating point value to a fixed point
value.
For example, a function to convert a float to a signed fractional
representation with 8 bits overall and 4 fractional bits (S3.4) can be
constructed and used with::
>>> s34 = float_to_fp(signed=True, n_bits=8, n_frac=4)
>>> hex(int(s34(0.5)))
'0x8'
The fixed point conversion is saturating::
>>> q34 = float_to_fp(False, 8, 4) # Unsigned 4.4
>>> hex(int(q34(-0.5)))
'0x0'
>>> hex(int(q34(15.0)))
'0xf0'
>>> hex(int(q34(16.0)))
'0xff'
Parameters
----------
signed : bool
Whether the values that are to be converted should be signed, or
clipped at zero.
>>> hex(int(float_to_fp(True, 8, 4)(-0.5))) # Signed
'-0x8'
>>> hex(int(float_to_fp(False, 8, 4)(-0.5))) # Unsigned
'0x0'
n_bits : int
Total number of bits in the fixed-point representation (including sign
bit and fractional bits).
n_frac : int
Number of fractional bits in the fixed-point representation.
"""
# Calculate the maximum and minimum values
if signed:
max_v = (1 << (n_bits - 1)) - 1
min_v = -max_v - 1
else:
min_v = 0
max_v = (1 << n_bits) - 1
# Compute the scale
scale = 2.0**n_frac
def bitsk(value):
"""Convert a floating point value to a fixed point value.
Parameters
----------
value : float
The value to convert.
"""
int_val = int(scale * value)
return max((min(max_v, int_val), min_v))
return bitsk | bdfe4a66bc879b41aaced73a5eabad3164e568d7 | 690,108 |
import random
def random_replicate_name(len=12):
"""Return a random alphanumeric string of length `len`."""
out = random.choices('abcdefghijklmnopqrtuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ0123456789', k=len)
return ''.join(out) | 88423a7ac2449e170d0f568ba1f33a0e03126808 | 690,109 |
def hk_modes(hier_num):
"""
Generate modes in the HK hierarchy.
Parameters
----------
hier_num : int
Number in the HK hierarchy (hier_num = n means the nth model).
Returns
-------
p_modes : list
List of psi modes, represented as tuples.
Each tuple contains the horizontal and vertical wavenumbers.
t_modes : list
List of theta modes, represented as tuples.
Examples
--------
>>> p_modes, t_modes = hk_modes(1)
>>> print(p_modes)
[(0, 1), (1, 1)]
>>> print(t_modes)
[(0, 2), (1,1)]
>>> p_modes, t_modes = hk_modes(2)
>>> print(p_modes)
[(0, 1), (0, 3), (1, 1), (1, 2)]
>>> print(t_modes)
[(0, 2), (0, 4), (1,1), (1, 2)]
"""
p_modes = [(0,1), (1,1)] #Base model
t_modes = [(0,2), (1,1)]
current_pair = (1,1) #
#Add pairs of modes. Fills shells of constant L1 norm by increasing norm.
#Within shell, fills modes in descending lexicographical order.
#When starting new shell, add modes of zero horiz wavenumber.
for i in range(1, hier_num):
if current_pair[1] == 1:
level = current_pair[0]+1
current_pair = (1, level)
p_modes.append((0, level*2-1))
t_modes.append((0, level*2))
else:
current_pair = (current_pair[0]+1, current_pair[1]-1)
p_modes.append(current_pair)
t_modes.append(current_pair)
p_modes.sort()
t_modes.sort()
return p_modes, t_modes | 57fe789043f9389b71f1097672320b921dba0faa | 690,112 |
def _is_target_node(node: str) -> bool:
"""Check if it is valid target node in BEL.
:param node: string representing the node
:return: boolean checking whether the node is a valid target in BEL
"""
if node.startswith('bp') or node.startswith('path'):
return True
return False | cee9950e6ca173970a042e8058d5eecfe1dbf234 | 690,114 |
def contrast(img, threshold):
"""
Constrast all pixels of an image given a threshold.
All pixels smaller or equal will be 0 and the other will be 255
"""
return (img > threshold) * 255 | a2c5cd449cb8333458892acbc5d2bc740d8ef1e2 | 690,117 |
def _intended_value(intended, unspecified, actual, name, msg):
"""
Return the intended value if the actual value is unspecified or has
the intended value already, and otherwise raise a ValueError with the
specified error message.
Arguments:
* `intended`: The intended value, or sequence of values. The first
item in the sequence will act as the intended value, the others
are also valid values.
* `unspecified`: A value indicating 'unspecified' (usually `None`).
* `actual`: The actual value.
* `name`: The name of the attribute that this is about, for use in the
exception message.
* `msg`: A context setting message, for use in the exception message.
"""
if isinstance(intended, (tuple, list)):
if actual == unspecified:
return intended[0] # the default
elif actual in intended:
return actual
else:
raise ValueError(msg + ", but specifies %s=%r (must be one of %r)" \
% (name, actual, intended))
else:
if actual == unspecified:
return intended
elif actual == intended:
return actual
else:
raise ValueError(msg + ", but specifies %s=%r (must be %r)" \
% (name, actual, intended)) | 6ee1eb519629ae96c3ef87c319ec71548d603d87 | 690,118 |
def cortical_contrast(mean_gm, mean_wm):
"""Calculate the vertex-wise cortical contrast.
- cortical contrast = (mean WM intensity) - (mean GM intensity) /
( (mean WM intensity + mean GM intensity) / 2 )
:type mean_gm: float
:param mean_gm: The mean value of the gray matter voxels.
:type mean_wm: float
:param mean_wm: The mean value of the white matter voxels.
:rtype: float
:return: The cortical contrast value.
"""
cort_con = (mean_wm - mean_gm) / ((mean_wm + mean_gm) / 2)
return cort_con | a01d8599a6a0d53ae978f111694bd47f133ce0bc | 690,119 |
def find_first(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default) | 1f485f11155fdffbbf64566646836239cdb3319c | 690,120 |
def model_set(model):
"""Converts a model from a dictionary representation to a set representation.
Given a ``model`` represented by a dictionary mapping atoms to Boolean values,
this function returns the *set* of atoms that are mapped to ``True`` in the dictionary.
Paramters
---------
model : A dictionary mapping atoms to Boolean values.
Example
-------
>>> p,q,r = [eb.parse_formula(letter) for letter in "pqr"]
>>> eb.model_set({p: True, q: False, r: True})
set([p, r])
"""
return set([atom for atom in model if model[atom] == True]) | 3f2155d5298e328e3d564b355a5cb445625e371c | 690,122 |
import logging
def get_type(node):
"""Get xpath-node's type.(recursive function).
Args:
node: The xpath-node's root.
Returns:
return xpath-node's type.
"""
if node.keyword not in ['leaf','leaf-list']:
return None
type_stmt = node.search_one("type")
if not type_stmt:
logging.info("leaf %s has no type defination", node.arg)
return None
type_spec = type_stmt.i_type_spec
if not type_spec:
return None
if type_spec.name == "leafref":
if hasattr(type_spec, "i_target_node"):
target_node = type_spec.i_target_node
return get_type(target_node)
else:
return None
else:
return type_stmt.i_type_spec.name | 8d80c1cad478c14bf761967fee3999924ad54042 | 690,123 |
def dict_to_list(d):
"""Converts an ordered dict into a list."""
# make sure it's a dict, that way dict_to_list can be used as an
# array_hook.
d = dict(d)
return [x[-1] for x in sorted(d.items())] | 6a56f890d3a5e6e9cb8a19fc5af8598bf3411d33 | 690,124 |
def halt_after_time(time,at_node,node_data,max_time=100):
"""
Halts after a fixed duration of time.
"""
return time>=max_time | c1d2b82d5a8b2019be8ea845bf5b135ed7c5209f | 690,127 |
def text_html_table(caption=None):
"""Return a text HtmlTable with a given caption for testing."""
if caption:
caption = f"<caption>{caption}</caption>"
return f"""
<table>
{caption}
<tr></tr>
<tr></tr>
</table>
""" | ab0c62a9a8eac2cb24e83216b05a25893bdbfe14 | 690,129 |
def is_command(text):
"""
Checks if `text` is a command. Telegram chat commands start with the '/' character.
:param text: Text to check.
:return: True if `text` is a command, else False.
"""
if (text is None): return None
return text.startswith('/') | fda755a7622f8b232e4dfd3caa3183981b4dc601 | 690,131 |
from typing import List
from typing import Tuple
def longest_path(dir_str: str) -> str:
"""
To find the longest path to any dir/file.
Can be easily modified to get the longest path to just a file
:param dir_str: Directory string containing the directory structure
:return: longest directory path (number of characters) to any file
"""
def util(dirs: List[str], prefix: str) -> Tuple[str, int]:
nonlocal index, l
if not dirs:
return "", 0
max_len = 0
return_str = ""
while index < l:
cur = dirs[index]
if cur.startswith(prefix):
cur = cur.lstrip(prefix)
index += 1
sub_str, sub_len = util(dirs, prefix + "\t")
if sub_len + len(cur) + 1 > max_len:
if sub_len:
max_len = sub_len + len(cur) + 1
return_str = cur + "/" + sub_str
else:
max_len = len(cur)
return_str = cur
else:
break
return return_str, max_len
if not dir_str:
return ""
all_dirs = dir_str.split("\n")
index: int = 0
l: int = len(all_dirs)
return util(all_dirs, "")[0] | 16929fe87a77503a23370b24af17a2d4b4316057 | 690,133 |
def columns(thelist, n):
"""
Break a list into ``n`` columns, filling up each row to the maximum equal
length possible. For example::
>>> l = range(10)
>>> columns(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> columns(l, 3)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
>>> columns(l, 4)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> columns(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
>>> columns(l, 9)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [], [], [], []]
# This filter will always return `n` columns, even if some are empty:
>>> columns(range(2), 3)
[[0], [1], []]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = list_len // n
if list_len % n != 0:
split += 1
return [thelist[split*i:split*(i+1)] for i in range(n)] | 5e427e81009ded1cf5e670fd4b68360df338a0b7 | 690,134 |
from typing import List
from typing import Tuple
def configuration_key_intersection(*argv: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""Return the intersection of the passed configuration key lists.
Args:
*args (list[(int, int)]): any number of configuration key lists to be \
intersected
Returns:
list[(int, int)]: a list of configuration keys found in every \
configuration passed.
"""
ref = set(argv[0])
for config in argv[1:]:
ref = ref.intersection(config)
return list(ref) | d40e05a7f3f06f086034ae8145dd994bdb4bc3b8 | 690,135 |
from typing import List
def find_first_in_list(txt: str, str_list: List[str]) -> int:
"""Returns the index of the earliest occurrence of an item from a list in a string
Ex: find_first_in_list('foobar', ['bar', 'fin']) -> 3
"""
start = len(txt) + 1
for item in str_list:
if start > txt.find(item) > -1:
start = txt.find(item)
return start if len(txt) + 1 > start > -1 else -1 | c979db94ae1e81e3e7cbfc6118b371ab2338192f | 690,137 |
import json
def from_json(config_file_path):
"""Read data from a json file.
Check the file exists and create it if not.
We want to return an empty dict and not fail if the file contains no data.
:param config_file_path Path: Path representing the file-to-read.
:returns dict: config data (or an empty dict if there was no data).
"""
config_file_path.touch(exist_ok=True)
try:
with open(str(config_file_path.resolve()), "r") as dfile:
return json.load(dfile)
except json.JSONDecodeError:
# The file contains no parsable json.
# In this case return an empty dict.
return dict() | 015840ad7eb969bc7c30f2ed4f4c0e20c69a7bba | 690,139 |
from typing import List
def calcCellDeltaList(cell: List[int]) -> List[int]:
"""Calculates the list of step sizes for a cell
Args:
cell (list): List of bits in a cell (i.e. [0, 1, 2, 3])
Returns:
list: List of step sizes between levels in the cell
"""
l = len(cell)
prev = 0
out: List[int] = []
for i in range(1, 2**l):
curr = 0
for idx, j in enumerate(list(bin(i)[2:].rjust(l, "0"))):
curr += 2 ** cell[l - idx - 1] * int(j)
out.append(curr - prev)
prev = curr
return out | 47d9ef4d30b517656f5fd84f1d8bad207fae30d2 | 690,142 |
def some(predicate, seq):
"""If some element x of seq satisfies predicate(x), return predicate(x).
>>> some(callable, [min, 3])
1
>>> some(callable, [2, 3])
0
"""
for x in seq:
px = predicate(x)
if px: return px
return False | f39d8fb62081f06eaf229041423e50ca3b2b817d | 690,145 |
import re
def get_first_stacktrace(stderr_data):
"""If |stderr_data| contains stack traces, only returns the first one.
Otherwise returns the entire string."""
# Use question mark after .+ for non-greedy, otherwise it will match more
# than one stack trace.
sanitizer_stacktrace_regex = r'ERROR: [A-z]+Sanitizer: .*\n(.|\n)+?ABORTING'
match = re.search(sanitizer_stacktrace_regex, stderr_data)
# If we can't find the first stacktrace, return the whole thing.
if match is None:
return stderr_data
return stderr_data[:match.end()] | 080ec4a56e7fd0c1377936fb881d72f09388d8ac | 690,146 |
import torch
def autograd_individual_gradients(X, y, forward_fn, parameters):
"""Compute individual gradients with a for-loop using autograd.
Loop over data (xₙ, yₙ) and compute ∇ℓ(xₙ, yₙ) with respect to `parameters`,
where ℓ is the forward function.
Note:
Individual gradients only make sense, if the summands in the loss
depend on a single datum (xₙ, yₙ).
Args:
X (torch.Tensor): `(N, *)` batch of input data.
y (torch.Tensor): `(N, ∘)` batch of input labels.
forward_func (callable): Function that computes the (individual) loss. Must have
signature `loss = forward(X, y)` and return a scalar tensor `loss`.
parameters (list): List of parameters, used `forward_fn` to compute the loss,
that `require_grad` (and w.r.t. which gradients will be computed).
Returns:
list: Individual gradients for every parameter in `parameters`, arranged in the
same order. Every item is of same shape as the associated parameter, with
an additional leading dimension of size `N` (gradient for each sample).
"""
N = X.shape[0]
individual_gradients = [torch.zeros(N, *p.shape).to(X.device) for p in parameters]
for n in range(N):
x_n = X[n].unsqueeze(0)
y_n = y[n].unsqueeze(0)
l_n = forward_fn(x_n, y_n)
g_n = torch.autograd.grad(l_n, parameters)
for param_idx, g in enumerate(g_n):
individual_gradients[param_idx][n] = g
return individual_gradients | 7548eef0091c8c83815063525fa98bfb8124dbb5 | 690,148 |
import typing
def hex(b: typing.Optional[bytes]) -> str:
"""convert an optional bytes into hex-encoded str, returns "" if bytes is None"""
return b.hex() if b else "" | c8877b6509635a2ba0a43f5dd275eb802fbb4224 | 690,149 |
def get_reserved_price(client, availability_zone, instance_type):
"""Gets price of a given reserved Linux instance type in a given availability zone."""
resp = client.describe_reserved_instances_offerings(
InstanceType=instance_type,
AvailabilityZone=availability_zone,
OfferingType="No Upfront",
ProductDescription="Linux/UNIX",
)
return float(resp["ReservedInstancesOfferings"][
0]["RecurringCharges"][0]["Amount"]) | 05f3448eaba2b853409a04109bf094f3c5dcbb88 | 690,151 |
def get_class_with_tablename(cls):
"""
Returns the first parent found (or the class itself) for given class which
has __tablename__ attribute set.
This function is needed for slug uniqueness testing when using concrete
inheritance.
:param cls: class to inspect
"""
mapper_args = {}
if hasattr(cls, '__mapper_args__'):
mapper_args = cls.__mapper_args__
if 'inherits' not in mapper_args:
return cls
if cls.__tablename__ != mapper_args['inherits'].__tablename__:
return cls
for parent in cls.__bases__:
result = get_class_with_tablename(parent)
if result:
return result
return None | 82027fcb8c97dc048ac8ea35cd6e68edf2a67aed | 690,155 |
from datetime import datetime
def convert_date(measured_time):
"""
Convert obtained from provider api date to correct for influxdb
:param measured_time:
:return: example - '2019-01-31T19:25:00Z'
"""
converted_measured_time = datetime.strptime(measured_time, "%Y-%m-%d %H:%M:%S")
return converted_measured_time.strftime("%Y-%m-%dT%H:%M:%SZ") | 638a43b8bc52f00a510e678569c9703cd6208dcf | 690,156 |
def split_train_test(X, Y, trs_len=0.80):
"""
Split both X and Y into train and test sets.
trs_len - how much data should we use for training?
by default it's 0.80 meaning 80%, the remining
20% of the data will be used for testing.
"""
lx = len(X)
# 1 year train set - 315 examples, test - 80.
# 3 year train set - 1167 examples, test - 294
trs = int(lx * trs_len) - 1
train_x, train_y = X[:trs], Y[:trs]
test_x, test_y = X[trs:], Y[trs:]
return train_x, train_y, test_x, test_y | 2932f5d40ac332b154d494a1b576279f9e24976d | 690,159 |
def pair_right(f):
"""Returns a function that given a value x, returns a tuple of the form: (x, f(x)).
>>> add_one = pair_right(lambda x: x + 1)
>>> add_one(3)
(3, 4)
"""
def pair_right(x):
return x, f(x)
return pair_right | 7b3134defe85eb2cc1d759384a9bd808f79f957f | 690,161 |
def npqt(fmp, f0p, fmf0=4.88):
"""Calculate NPQt
NPQt = (4.88 / ((fmp / f0p) - 1)) - 1
:param fmp: Fm'
:param f0p: F0'
:param fmf0: Fv/Fm (default: 4.88)
:returns: NPQ (float)
"""
return (fmf0 / ((fmp / f0p) - 1)) - 1 | 4a33b57f53272f56b0d2ba55d80ef817509298b3 | 690,162 |
def parse_arguments(parser):
"""Read user arguments"""
parser.add_argument('--path_model',
type=str, default='Model/weights.01.hdf5',
help='Path to the model to evaluate')
parser.add_argument('--path_data', type=str, default='data/data_test.pkl',
help='Path to evaluation data')
parser.add_argument('--path_target', type=str, default='data/target_test.pkl',
help='Path to evaluation target')
parser.add_argument('--omit_graphs', action='store_false',
help='Does not output graphs if argument is present')
parser.add_argument('--n_steps', type=int, default=300,
help='Maximum number of visits after which the data is truncated')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size for prediction (higher values are generally faster)')
parser.add_argument('--multigpu', action='store_true',
help='If argument is present enable using mutiple gpu')
args = parser.parse_args()
return args | f6677799ed81161727c96b45b6010c8442dece97 | 690,166 |
from datetime import datetime
def convert_datetime(timestamp: int) -> datetime:
"""Convert a java microseconds timestamp from the ToonAPI to a datetime."""
return datetime.utcfromtimestamp(timestamp // 1000.0).replace(
microsecond=timestamp % 1000 * 1000
) | 311070f45c96f917fcf912d2d455db13b26c7d40 | 690,167 |
import re
def ParseRevision(lines):
"""Parse the revision number out of the raw lines of the patch.
Returns 0 (new file) if no revision number was found.
"""
for line in lines[:10]:
if line.startswith('@'):
break
m = re.match(r'---\s.*\(.*\s(\d+)\)\s*$', line)
if m:
return int(m.group(1))
return 0 | c892881677ca4ab96b5acf951e54c924b3baa43c | 690,170 |
from datetime import datetime
def dicom_strfdate( dt: datetime ) -> str:
"""
datetime -> dicom date
"""
return dt.strftime( "%Y%m%d" ) | e31e1765afb6e3127988ddeff56e22527e346f99 | 690,173 |
def get_depth(phylo_tree):
"""
Returns the depth of a tree.
"""
depth = 0
for terminal_node in phylo_tree.get_terminals(order='preorder'):
path_length = len(phylo_tree.get_path(target=terminal_node))
if path_length > depth:
depth = path_length
return depth | 051665b7a7bc7561c93e33ee5071eaa762c635e7 | 690,175 |
import requests
def get_response_from_url(url):
"""Returns the Hue API response to a URL as json."""
response = requests.get(url).json()
return response | b540701a6ab856f89689b470b1a986f1a106605d | 690,177 |
def check_neighboring_blocks(block, grid):
"""
Given a block, return the immediate neighbors of the block.
Parameters
----------
block : tuple
The row and column of a block (row, col)
grid : ndarray
The numpy 2D array of containing blocks (1) and empty space (0)
Returns
-------
neighbors : list of tuple (row, col)
A list of the immediate neighbors of the given block.
"""
neighbors = []
i, j = block
if i >= 1 and grid[i-1, j] == 1:
neighbors.append((i-1, j))
if j >= 1 and grid[i, j-1] == 1:
neighbors.append((i, j-1))
if i < 127 and grid[i+1, j] == 1:
neighbors.append((i+1, j))
if j < 127 and grid[i, j+1] == 1:
neighbors.append((i, j+1))
return neighbors | a0b185d5d6056503a9b2ed5ee48dc01367a8977f | 690,179 |
def left_of_line(point, p1, p2):
""" True if the point self is left of the line p1 -> p2
"""
# check if a and b are on the same vertical line
if p1[0] == p2[0]:
# compute # on which site of the line self should be
should_be_left = p1[1] < p2[1]
if should_be_left:
return point[0] < p1[0]
else:
return point[0] > p1[0]
else:
# get pitch of line
pitch = (p2[1] - p1[1]) / (p2[0] - p1[0])
# get y-value at c's x-position
y = pitch * (point[0] - p1[0]) + p1[1]
# compute if point should be above or below the line
should_be_above = p1[0] < p2[0]
if should_be_above :
return point[1] > y
else:
return point[1] < y | 5cb130fecd46fe7eb74cee5179f4705b8ee4760f | 690,180 |
def distinct_words(corpus):
""" Determine a list of distinct words for the corpus.
Params:
corpus (list of list of strings): corpus of documents
Return:
corpus_words (list of strings): list of distinct words across the corpus, sorted (using python 'sorted' function)
num_corpus_words (integer): number of distinct words across the corpus
"""
corpus_words = []
num_corpus_words = -1
# ------------------
# Write your implementation here.
corpus_words = [y for x in corpus for y in x]
corpus_words = sorted(list(set(corpus_words)))
num_corpus_words = len(corpus_words)
# ------------------
return corpus_words, num_corpus_words | d663b4970f0163a1cd8b3d9d898ac193b776377b | 690,181 |
def extractFeatures(pages, dataset):
"""
Extract the amount of page views for each student for each page
\n
:param pages: A list of all the (unique) pages to use \t
:type pages: list \n
:param dataset: A list of dictionaries, each dictionary representing one student and having at least the key "pages" \t
:type dataset: [dict] \n
:returns: A dictionary with two keys: "scores" and "features" \t
:rtype: dict \n
"""
scores = list()
pageslists = dict()
for page in pages:
pageslists[page] = list()
for datapoint in dataset:
scores.append(datapoint.get("score"))
for page in pages:
if page in datapoint["pages"]:
pageslists[page].append(datapoint["pages"][page])
else:
pageslists[page].append(0)
return {"scores": scores, "features": pageslists} | 2c44219c5143278602ad66b555292b41e6c26a63 | 690,182 |
import torch
def rmse(hat_y, y):
"""RMSE
Args:
hat_y: 预测值
y: 真实值
Return:
('rmse', rmse): 评价指标名称,评价结果
"""
rmse = torch.sqrt(torch.mean(torch.pow(y - hat_y, 2)))
return 'rmse', rmse | 9023684d0ecff28e1e6039d9610ea782fb4983d6 | 690,184 |
from typing import Tuple
def manhattan_distance(point1: Tuple[int, int], point2: Tuple[int, int]) -> int:
"""Calculate and return the Manhattan distance between two points.
:param point1: first point
:param point2: second point
:return: Manhattan distance between the two points
"""
x1, y1 = point1
x2, y2 = point2
return abs(x1 - x2) + abs(y1 - y2) | 58138cb5c171aa85d5b21ceda316694f310b2994 | 690,185 |
def get_dofs(data):
"""
Gets the number of target DOFs (i.e. the number of different motion
classes required to complete a trial).
"""
return len(data['target']['pose']) | a8c958b226228311e762119ef4ccef7a34276de1 | 690,187 |
def precompute_idfs(wglobal, dfs, total_docs):
"""Pre-compute the inverse document frequency mapping for all terms.
Parameters
----------
wglobal : function
Custom function for calculating the "global" weighting function.
See for example the SMART alternatives under :func:`~gensim.models.tfidfmodel.smartirs_wglobal`.
dfs : dict
Dictionary mapping `term_id` into how many documents did that term appear in.
total_docs : int
Total number of documents.
Returns
-------
dict of (int, float)
Inverse document frequencies in the format `{term_id_1: idfs_1, term_id_2: idfs_2, ...}`.
"""
# not strictly necessary and could be computed on the fly in TfidfModel__getitem__.
# this method is here just to speed things up a little.
return {termid: wglobal(df, total_docs) for termid, df in dfs.items()} | 55247dfe65c4c6b113554ce59233d8e34097e16a | 690,188 |
def parse_tensor_name_with_slicing(in_str):
"""Parse tensor name, potentially suffixed by slicing string.
Args:
in_str: (str) Input name of the tensor, potentially followed by a slicing
string. E.g.: Without slicing string: "hidden/weights/Variable:0", with
slicing string: "hidden/weights/Variable:0[1, :]"
Returns:
(str) name of the tensor
(str) slicing string, if any. If no slicing string is present, return "".
"""
if in_str.count("[") == 1 and in_str.endswith("]"):
tensor_name = in_str[:in_str.index("[")]
tensor_slicing = in_str[in_str.index("["):]
else:
tensor_name = in_str
tensor_slicing = ""
return tensor_name, tensor_slicing | f42b4aba99284cc971698fa46ae7afb1220a3bee | 690,189 |
def get_runtime_and_maxRAM(dataset):
""" Return runtime in hours and max RAM in GB """
curr_dir = dataset + "/"
log = {}
logfile = curr_dir + dataset + ".log"
with open(logfile) as f:
for line in f:
if line.startswith("="):
continue
(key, val) = line.split()[0:2]
log[key] = val
runtime_s = float((log["ru_utime"]).split("s")[0])
max_RAM = float(log["maxvmem"].split("GB")[0])
# Convert to hours
runtime = round(runtime_s/(60*60),3)
return runtime, max_RAM | 51874b07529b88ebba2f5916b76f48672db55e48 | 690,192 |
import hashlib
def double_sha256(string, as_hex=False):
"""
Get double SHA256 hash of string
:param string: String to be hashed
:type string: bytes
:param as_hex: Return value as hexadecimal string. Default is False
:type as_hex
:return bytes, str:
"""
if not as_hex:
return hashlib.sha256(hashlib.sha256(string).digest()).digest()
else:
return hashlib.sha256(hashlib.sha256(string).digest()).hexdigest() | bce1607fbbab0c3c9a3b3dd2dcd2e74b6cb84f87 | 690,194 |
def lower_dict_keys(some_dict):
"""Convert all keys to lowercase"""
result = {}
for key, value in some_dict.items():
try:
result[key.lower()] = value
except AttributeError:
result[key] = value
return result | df205f482aab8f39c063d13f63b1ef69d65f794b | 690,197 |
import time
import math
def time_since(since):
"""
Calculate processed time in min and sec
:param since: time data when processing started
:return: string to show min and sec
"""
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s) | 5e368bfbe3f629c5e975a4b3ce09f15307d48980 | 690,198 |
def add_jekyll_header(html_str, layout, title, description):
"""
Add the Jekyll header to the html strings.
Args:
html_str (str): HTML of converted notebook.
layout (str): Jekyll layout to use.
title (str): Title to use.
description (str): Description to use
Returns:
(str): HTML with Jekyll layout header on top.
"""
header = '\n'.join([
'---',
'layout: {}'.format(layout),
'title: {}'.format(title),
'description: {}'.format(description),
'---',
''
])
return '\n'.join([header, html_str]) | 038ab0b2821ef87292918a8acd80c14f76e3dc9a | 690,199 |
from datetime import datetime
def parse_iso8601(t):
"""Return datetime from ISO8601 string."""
return datetime.strptime(t, '%Y-%m-%dT%H:%M:%S.%fZ') | 0771b1cb4a8be0e40ee92f31fb7efd93a8c381c5 | 690,203 |
import re
def get_first_line_text_from_html(html_string):
"""reduce html to the first line of text and strip all html tags
"""
if html_string is None:
return None
p_div = re.compile(r"</?(p|div|br).*?>",
re.IGNORECASE | re.DOTALL)
html_string = p_div.sub("\n", html_string)
html_string = re.sub('<[^<]+?>', '', html_string)
html_string = html_string.split('\n', 1)[0]
return html_string | 287ff2036c2aed0e1d013d8040628a7216ca2ba7 | 690,206 |
def cskp(self, kcn="", kcs="", porig="", pxaxs="", pxypl="", par1="",
par2="", **kwargs):
"""Defines a local coordinate system by three keypoint locations.
APDL Command: CSKP
Parameters
----------
kcn
Arbitrary reference number assigned to this coordinate system.
Must be greater than 10. A coordinate system previously defined
with this number will be redefined.
kcs
Coordinate system type:
0 or CART - Cartesian
1 or CYLIN - Cylindrical (circular or elliptical)
2 or SPHE - Spherical (or spheroidal)
3 or TORO - Toroidal
porig
Keypoint defining the origin of this coordinate system. If PORIG =
P, graphical picking is enabled and all remaining command fields
are ignored (valid only in the GUI).
pxaxs
Keypoint defining the positive x-axis orientation of this
coordinate system.
pxypl
Keypoint defining the x-y plane (with PORIG and PXAXS) in the first
or second quadrant of this coordinate system.
par1
Used for elliptical, spheroidal, or toroidal systems. If KCS = 1
or 2, PAR1 is the ratio of the ellipse Y-axis radius to X-axis
radius (defaults to 1.0 (circle)). If KCS = 3, PAR1 is the major
radius of the torus.
par2
Used for spheroidal systems. If KCS = 2, PAR2 = ratio of ellipse
Z-axis radius to X-axis radius (defaults to 1.0 (circle)).
Notes
-----
Defines and activates a local right-handed coordinate system by
specifying three existing keypoints: to locate the origin, to locate
the positive x-axis, and to define the positive x-y plane. This local
system becomes the active coordinate system. See the CLOCAL, CS,
CSWPLA, and LOCAL commands for alternate definitions. Local coordinate
systems may be displayed with the /PSYMB command.
This command is valid in any processor.
"""
command = "CSKP,%s,%s,%s,%s,%s,%s,%s" % (str(kcn), str(kcs), str(
porig), str(pxaxs), str(pxypl), str(par1), str(par2))
return self.run(command, **kwargs) | 7b8f1357dae8cba6c3b2cc6a7d7e72a6f4a7ff9e | 690,207 |
import re
def structure_from_viewer(status, atlas_layer, atlas):
"""
Get brain region info from mouse position in napari viewer.
Extract nx3 coordinate pair from napari window status string.
Return brainglobe (BG) structure number, name, hemisphere, and a
"pretty" string that can be displayed for example in the status bar.
Parameter
---------
status : str, Napari viewer status (napari.viewer.Viewer.status)
atlas_layer : Napari viewer layer
Layer, which contains the annotation / region
information for every structure in the (registered)
atlas
atlas : Brainglobe atlas (bg_atlasapi.bg_atlas.BrainGlobeAtlas)
Returns
-------
If any error is raised, (None,None,None,"") is returned
structure_no : int
BG Structure number
Returns none if not found
structure : str
Structure name
Returns none if not found
hemisphere : str
Hemisphere name
Returns none if not found
region_info : str
A string containing info about structure
and hemisphere
Returns empty string if not found
"""
# Using a regex, extract list of coordinates from status string
assert hasattr(atlas_layer, "data"), "Atlas layer appears to be empty"
assert (
atlas_layer.data.ndim == 3
), f'Atlas layer data does not have the right dim ("{atlas_layer.data.ndim}")'
try:
coords = re.findall(r"\[\d{1,5}\s+\d{1,5}\s+\d{1,5}\]", status)[0][
1:-1
]
coords_list = coords.split()
map_object = map(int, coords_list)
coord_list = tuple(map_object)
except (IndexError, ValueError):
# No coordinates could be extracted from status
return None, None, None, ""
# Extract structure number
try:
structure_no = atlas_layer.data[coord_list]
except IndexError:
return None, None, None, ""
if structure_no in [0]: # 0 is "Null" region
return None, None, None, ""
# Extract structure information
try:
structure = atlas.structures[structure_no]["name"]
except KeyError:
return None, None, None, ""
# ... and make string pretty
region_info = []
for struct in structure.split(","):
region_info.append(struct.strip().capitalize())
hemisphere = atlas.hemisphere_from_coords(
coord_list, as_string=True
).capitalize()
region_info.append(hemisphere)
region_info = " | ".join(region_info)
return structure_no, structure, hemisphere, region_info | a0f92a90cf13b1bc01081167f95c248a1b74c046 | 690,208 |
def altera_caractere(s, c, i):
"""
Altera um caractere em uma string
:param s: str; palavra a ser editada
:param c: chr; caractere substituto
:param i: int; indice do caractere a ser substituido
:return: str
"""
x = list(s)
x[i] = c
return ''.join(x) | 3e7efde5355f28c2a798d871b7071903c76c5bad | 690,209 |
def normalize_context_key(string):
"""Normalize context keys
Function will normalize the string (remove white spaces and tailings)
Args:
string (str):
Returns:
Normalized string
"""
tmp = string[:1].upper() + string[1:]
return tmp.replace(" ", "") | dfb699aab7e2e7661cd89aaedec0d9186e92afef | 690,211 |
def cli(ctx, history_id, dataset_id, follow=False):
"""Get details related to how dataset was created (``id``, ``job_id``, ``tool_id``, ``stdout``, ``stderr``, ``parameters``, ``inputs``, etc...).
Output:
Dataset provenance information
For example::
{'id': '6fbd9b2274c62ebe',
'job_id': '5471ba76f274f929',
'parameters': {'chromInfo': '"/usr/local/galaxy/galaxy-dist/tool-data/shared/ucsc/chrom/mm9.len"',
'dbkey': '"mm9"',
'experiment_name': '"H3K4me3_TAC_MACS2"',
'input_chipseq_file1': {'id': '6f0a311a444290f2',
'uuid': 'null'},
'input_control_file1': {'id': 'c21816a91f5dc24e',
'uuid': '16f8ee5e-228f-41e2-921e-a07866edce06'},
'major_command': '{"gsize": "2716965481.0", "bdg": "False", "__current_case__": 0, "advanced_options": {"advanced_options_selector": "off", "__current_case__": 1}, "input_chipseq_file1": 104715, "xls_to_interval": "False", "major_command_selector": "callpeak", "input_control_file1": 104721, "pq_options": {"pq_options_selector": "qvalue", "qvalue": "0.05", "__current_case__": 1}, "bw": "300", "nomodel_type": {"nomodel_type_selector": "create_model", "__current_case__": 1}}'},
'stderr': '',
'stdout': '',
'tool_id': 'toolshed.g2.bx.psu.edu/repos/ziru-zhou/macs2/modencode_peakcalling_macs2/2.0.10.2',
'uuid': '5c0c43f5-8d93-44bd-939d-305e82f213c6'}
"""
return ctx.gi.histories.show_dataset_provenance(history_id, dataset_id, follow=follow) | 289e08ff939459bb4035790e2eed69cd5977cc49 | 690,212 |
import six
def safe_utf8(string):
"""Returns bytes on Py3 and an utf-8 encoded string on Py2."""
if six.PY2:
return string.encode("utf-8")
else:
return string | c044b34e73c4fdc07299f6d1a99ad9418ffecb41 | 690,213 |
def clip_colours(colour_value):
""" This function ensures that our auto white balance module does not
exceed the limits of our RGB spectrum.
:param colour_value: The value of our colour channel.
:return: The normalised value of our colour channel.
"""
if colour_value <= 0:
# Value of 0 is absolute black and cannot go lower.
value = 0
elif colour_value >= 255:
# Value of 255 is absolute white and cannot go higher.
colour_value = 255
# Value must be whole number.
return round(colour_value) | 5da84bf971ea2f41824b6b8723c762d8ee3aa883 | 690,214 |
import math
def mu_a(x, alpha_L=0.1, alpha_R=0.1):
"""
Calculates the asymetric alpha-trimmed mean
"""
# sort pixels by intensity - for clipping
x = sorted(x)
# get number of pixels
K = len(x)
# calculate T alpha L and T alpha R
T_a_L = math.ceil(alpha_L*K)
T_a_R = math.floor(alpha_R*K)
# calculate mu_alpha weight
weight = (1/(K-T_a_L-T_a_R))
# loop through flattened image starting at T_a_L+1 and ending at K-T_a_R
s = int(T_a_L+1)
e = int(K-T_a_R)
val = sum(x[s:e])
val = weight*val
return val | c72f07c43ada924104d9d0eefc75d0788929bbfd | 690,220 |
import torch
def get_relative_position_matrix(length, max_relative_position, direction, offset=True):
""" Generate matrix of relative positions between inputs ([..., length])."""
range_vec = torch.arange(length).long()
if torch.cuda.is_available():
range_vec = range_vec.cuda()
range_mat = range_vec[:, None].expand(length, length)
distance_mat = range_mat - range_mat.transpose(0, 1)
if max_relative_position is None:
distance_mat_clipped = distance_mat
else:
distance_mat_clipped = torch.clamp(distance_mat,
-max_relative_position, max_relative_position)
if direction:
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
if offset and max_relative_position is not None:
final_mat = distance_mat_clipped + max_relative_position
else:
final_mat = distance_mat_clipped
else:
# Do not distinguish the forward and backward positions.
# Just leave the absolute relative position representation.
final_mat = distance_mat_clipped.abs()
return final_mat | 0ed57ce5e33f5c8dc6f582a0efd9237cbd496f4e | 690,223 |
import yaml
def read_config(fname):
"""Read configuration file."""
with open(fname, 'r') as fid:
config = yaml.load(fid, yaml.SafeLoader)
return config | 206be0741b3be273c38e3aaeb8fc4bf83f6481b2 | 690,224 |
def graph6n(data):
"""Read initial one or four-unit value from graph6 sequence. Return value, rest of seq."""
if data[0] <= 62:
return data[0], data[1:]
return (data[1]<<12) + (data[2]<<6) + data[3], data[4:] | 799a08bd4728674247c26031dee13e77fa9f4e9c | 690,234 |
def _make_emm_plugin_finalizer(handle, allocations):
"""
Factory to make the finalizer function.
We need to bind *handle* and *allocations* into the actual finalizer, which
takes no args.
"""
def finalizer():
"""
Invoked when the MemoryPointer is freed
"""
# At exit time (particularly in the Numba test suite) allocations may
# have already been cleaned up by a call to Context.reset() for the
# context, even if there are some DeviceNDArrays and their underlying
# allocations lying around. Finalizers then get called by weakref's
# atexit finalizer, at which point allocations[handle] no longer
# exists. This is harmless, except that a traceback is printed just
# prior to exit (without abnormally terminating the program), but is
# worrying for the user. To avoid the traceback, we check if
# allocations is already empty.
#
# In the case where allocations is not empty, but handle is not in
# allocations, then something has gone wrong - so we only guard against
# allocations being completely empty, rather than handle not being in
# allocations.
if allocations:
del allocations[handle]
return finalizer | 682378d6963bf924b77872c2ddf68105c90384b0 | 690,235 |
def ps_weight_timemean(field, ps):
"""
This takes the surface pressure time mean of atmos_fields
input:
field xr.DataArray or xr.Dataset
ps surface pressure field with the same dimensions are field, it does not need the verical coordinates
return
same structure are field but time averaged
"""
return (field * ps).mean('time') /ps.mean('time') | a33d3eb7cdece49e8e6ff41c6dff46bf40a4deb4 | 690,239 |
def cite_key(extracted_cite):
"""Get a hashed key to represent a given ExtractedCite object, to check whether a new one is redundant."""
return hash((
extracted_cite.cite,
extracted_cite.normalized_cite,
extracted_cite.rdb_cite,
extracted_cite.rdb_normalized_cite,
extracted_cite.reporter,
extracted_cite.category,
extracted_cite.cited_by_id,
extracted_cite.target_case_id,
tuple(extracted_cite.target_cases) if extracted_cite.target_cases else None,
frozenset(extracted_cite.groups.items()) if extracted_cite.groups else None,
frozenset(extracted_cite.metadata.items()) if extracted_cite.metadata else None,
tuple(frozenset(p.items()) for p in extracted_cite.pin_cites) if extracted_cite.pin_cites else None,
extracted_cite.weight,
extracted_cite.year,
)) | 50daa265cfe45745726eb2a21794b9bca75f571d | 690,241 |
from functools import reduce
from operator import mul
def doubleFactorial(n):
"""
Returns double factorial of an integer.
"""
return reduce(mul, range(n, 0, -2)) | 43e79b8f43bd4e63806f24c3847fd447e86b3bf9 | 690,242 |
from typing import Dict
def delete_api_gateway(api_gateway_client, api_gateway_id: str) -> Dict:
"""
Delete the API Gateway given ID.
Args:
api_gateway_client: API Gateway V2 Client.
api_gateway_id: API Gateway ID to delete.
Returns: The delete_api API response dict.
"""
return api_gateway_client.delete_api(ApiId=api_gateway_id) | 50018d98484ac43576718fafdafc3da0092ab77c | 690,244 |
def cal_weight(from_x, from_y, to_x, to_y):
"""
calculate distance
Args:
from_x: x coordinate
from_y: y coordinate
to_x: x coordinate
to_y: y coordinate
Returns:
distance
"""
# return abs(from_x - to_x) + abs(from_y - to_y) # manhattan
return ((from_x - to_x) ** 2 + (from_y - to_y) ** 2) ** 0.5 | e9f56f935e61f150b6d7632bbf0e4d8282a4d8de | 690,247 |
def convert_datetimes_to_seconds(start_date, datetimes):
""" Converts difference in datetimes to total elapsed seconds.
Parameters
----------
start_date : datetime object
Start date to use for calculating total elapsed seconds.
datetimes : list of datetimes
List of datetimes to calculate total elapsed seconds for.
Returns
-------
list of float
Total elapsed seconds from start_date for each element in datetimes list.
"""
datetimes_seconds = [(dt - start_date).total_seconds() for dt in datetimes]
return datetimes_seconds | 2f0185731b04b669942a406e025f668186025e84 | 690,248 |
import ast
def is_name_in_ptree(name, ptree):
"""
Return True if an ast.Name node with the given name as its id
appears anywhere in the ptree, False otherwise
"""
if not ptree:
return False
for node in ast.walk(ptree):
if isinstance(node, ast.Name) and (node.id == name):
return True
return False | c2d6d0001c3baf14c110ff351c8a1c5e97b256d8 | 690,251 |
def get_location(http_info):
"""Extract the redirect URL from a pysaml2 http_info object"""
assert 'headers' in http_info
headers = http_info['headers']
assert len(headers) == 1
header_name, header_value = headers[0]
assert header_name == 'Location'
return header_value | dac8617d634467b16e58d2a02c1a37e4520e7746 | 690,252 |
def modus_ponens(p, q):
"""Implements the modus ponens logic table: p -> q"""
if p:
return q
else:
return not p | b60b99f4cf1c7d2fe7d1c7b96ed8350e2d8a7a9f | 690,255 |
def interval_idx(x, xi):
"""
Given a grid of points xi (sorted smallest to largest) find in
which interval the point x sits. Returns i, where x \in [ xi[i],xi[i+1] ].
Raise ValueError if x is not inside the grid.
"""
assert x >= xi[0] and x <= xi[-1], ValueError(
'x=%g not in [%g,%g]' % (x, xi[0], xi[-1])
)
for i in range(len(xi) - 1):
if xi[i] <= x and x <= xi[i + 1]:
return i | 630d44e645def046bb156f71ba69c1abb9ea69ec | 690,257 |
def atResolution(rect):
"""
Returns true iff the rectangle described is at resolution
"""
return True | 07d3c787aeb74f6395260d21abe5555722f348f9 | 690,262 |
def _isSubsequenceContained(subSequence, sequence):# pragma: no cover
"""
Checks if the subSequence is into the sequence and returns a tuple that
informs if the subsequence is into and where. Return examples: (True, 7),
(False, -1).
"""
n = len(sequence)
m = len(subSequence)
for i in range(n-m+1):
equal = True
for j in range(m):
equal = subSequence[j] == sequence[i+j]
if not equal:
break
if equal:
return True, i
return False, -1 | aa064fd1017342e0d980aadba69a1929f02a1e8f | 690,264 |
def get_service(v1, service):
"""Get service spec for service"""
return v1.list_service_for_all_namespaces(watch=False, field_selector="metadata.name=%s"%service) | de7327bbece40d3015424605c8f481367a13549e | 690,267 |
def MakeSpecificSKUPropertiesMessage(messages, instance_properties,
total_count):
"""Constructs a specific sku properties message object."""
return messages.FutureReservationSpecificSKUProperties(
totalCount=total_count, instanceProperties=instance_properties) | de3e9b6a4f85a886cdbfac66e71d2a16e589024c | 690,269 |
def gl_add_user_group_project(gl_project_group, gl_user, gl_access_level):
"""
Adds a Gitlab user to a Gitlab project or group at the given access level
:param gl_project_group: A Project or Group object
:param gl_user: A User instance
:param gl_access_level: A gitlab.Access_Level. Can be gitlab.GUEST_ACCESS, gitlab.REPORTER_ACCESS,
gitlab.DEVELOPER_ACCESS, gitlab.MASTER_ACCESS, or gitlab.OWNER_ACCESS (group only).
:return: A handle to the member instance. Use this to adjust properties of the member.
"""
return gl_project_group.members.create({
'user_id': gl_user.id,
'access_level': gl_access_level
}) | a223bb06201f0ffd1ba7d4c6466368433f60816d | 690,270 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.