content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def mtci_vi(imgData, wave, mask=0, bands=[-1,-1,-1]):
"""
Function that calculates the MERIS Terrestrial Chlorophyll Index.
This functions uses wavelengths 753.75, 708.75, and 681.25 nm. The closest bands to these values will be used.
Citation: Dash, J. and Curran, P.J. 2004. The MERIS terrestrial chlo... | b1f88d2041d8cf9fa645316b47db472e3626f1f8 | 9,906 |
def GetSourceFile(file, sourcepath):
"""Return a relative file if it is embedded in a path."""
for root in sourcepath:
if file.find(root) == 0:
prefix_length = len(root)
if not root.endswith('/'):
prefix_length += 1
relative_file = file[prefix_length:]
return relative_file
retu... | b241497131c3595f78ebf9d1481c8d9d50887e5a | 9,907 |
def refine_gene_list(adata, layer, gene_list, threshold, return_corrs=False):
"""Refines a list of genes by removing those that don't correlate well with the average expression of
those genes
Parameters
----------
adata: an anndata object.
layer: `str` or None (default: `None`)
... | 0b26b5265bf62a6f771bb762cd3c497fa628c5c3 | 9,910 |
def shape_to_coords(value, precision=6, wkt=False, is_point=False):
"""
Convert a shape (a shapely object or well-known text) to x and y coordinates
suitable for use in Bokeh's `MultiPolygons` glyph.
"""
if is_point:
value = Point(*value).buffer(0.1 ** precision).envelope
... | 1b585f6bb9831db63b2e0e8c52b6fb29ba0d9ab9 | 9,911 |
def max_union(map_list):
"""
Element-wise maximum of the union of a list of HealSparseMaps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to compute the maximum of
Returns
-------
result : `HealSparseMap`
Element-wise maximum of maps
... | 169fef50486e22468f8942f6968630e8fdef6648 | 9,913 |
def getDtypes(attributes, forecastHorizon):
"""
Auxillary function to generate dictionary of datatypes for data queried from dynamo.
Parameters
----------
attributes : list,
Attributes queried from dynamo.
forecastHorizon : integer,
Number of forecast horizons which have been qu... | 4974b7fe8107b36556da41173508c908785ddf5f | 9,914 |
def cazy_synonym_dict():
"""Create a dictionary of accepted synonms for CAZy classes."""
cazy_dict = {
"Glycoside Hydrolases (GHs)": ["Glycoside-Hydrolases", "Glycoside-Hydrolases", "Glycoside_Hydrolases", "GlycosideHydrolases", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE_HYDROLASES", "GL... | 0d635075901cc3e6ba7b432c68e5be3f7d2c34d6 | 9,915 |
def new_oauth2ProviderLimited(pyramid_request):
"""this is used to build a new auth"""
validatorHooks = CustomValidator_Hooks(pyramid_request)
provider = oauth2_provider.OAuth2Provider(
pyramid_request,
validator_api_hooks=validatorHooks,
validator_class=CustomValidator,
serv... | ef15f43dfa0549431931210d788fd8ccde611634 | 9,916 |
def rand_color(red=(92, 220), green=(92, 220), blue=(92, 220)):
""" Random red, green, blue with the option to limit the ranges.
The ranges are tuples 0..255.
"""
r = rand_byte(red)
g = rand_byte(green)
b = rand_byte(blue)
return f"#{r:02x}{g:02x}{b:02x}" | 43244b5912585a4496abbd6868f97a368fd785f0 | 9,917 |
import base64
def tile_to_html(tile, fig_size=None):
""" Provide HTML string representation of Tile image."""
b64_img_html = '<img src="data:image/png;base64,{}" />'
png_bits = tile_to_png(tile, fig_size=fig_size)
b64_png = base64.b64encode(png_bits).decode('utf-8').replace('\n', '')
return b64_im... | 9e22304c9ee44a850e17930088b0fc81b390fded | 9,918 |
def generate_buchwald_hartwig_rxns(df):
"""
Converts the entries in the excel files from Sandfort et al. to reaction SMILES.
"""
df = df.copy()
fwd_template = '[F,Cl,Br,I]-[c;H0;D3;+0:1](:[c,n:2]):[c,n:3].[NH2;D1;+0:4]-[c:5]>>[c,n:2]:[c;H0;D3;+0:1](:[c,n:3])-[NH;D2;+0:4]-[c:5]'
methylaniline = '... | 80351743c2f651965735f38b514d7af017fc25ce | 9,919 |
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.nibabel import GenerateS... | 5953ae62d40002283b41b4289fc45b96b50e319c | 9,920 |
def _get_indent(node):
"""Determine the indentation level of ``node``."""
indent = None
while node:
indent = find_first(node, TOKEN.INDENT)
if indent is not None:
indent = indent.value
break
node = node.parent
return indent | ed54eb8c1ea227534af0a3bd8eda9ab9089755d7 | 9,921 |
def distancesarr(image_centroid, object_centroids):
"""gets the distances between image and objects"""
distances = []
j = 0
for row in object_centroids:
distance = centroid_distance(image_centroid, object_centroids, j)
distances.append(distance)
j +=1
return distances | 7abae0c58a2cc672b789d4c8620878b7e3b46375 | 9,922 |
def obs_agent_has_neighbour(agent_id: int, factory: Factory) -> np.ndarray:
"""Does this agent have a neighbouring node?"""
agent: Table = factory.tables[agent_id]
return np.asarray(
[
agent.node.has_neighbour(Direction.up),
agent.node.has_neighbour(Direction.right),
... | d91b4d7eabcac6ed71149ad9220c2594e5054e36 | 9,923 |
def P_split_prob(b):
"""Returns the probability of b according to the P_split() distribution.
"""
"""n = b.length
if n <= 2:
p = 1.0
else:
k = 1
# si el arbol es binario y n > 2 seguro que tiene que ser splittable.
#while k < n and not b.splittable(k):
while n... | 94577a96e926686107a154aa82d55ceef6b9ab24 | 9,924 |
def t():
"""Or time(). Returns the number of seconds elapsed since the cartridge was run."""
global begin
return py_time.time() - begin | 1b43767629c9585fcd29c1293ee043a189332ed7 | 9,925 |
from typing import Any
def convert_none(
key: str, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False
) -> str:
"""Converts a null value into an XML element"""
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(None)
attrstring = make_attrstr... | c04efd6ed52cb092d6987f627b7222668da32dfd | 9,928 |
def is_title(ngram, factor = 2.0):
"""
Define the probability of a ngram to be a title.
Factor is for the confidence coex max.
"""
confidence = 1
to_test = [n for n in ngram if n not in stop_words]
for item in to_test:
if item.istitle(): confidence += factor / len(to_test)
# p... | 678959cdafc966d05b5ef213b0727799f20a8e0f | 9,929 |
def ul(microliters):
"""Unicode function name for creating microliter volumes"""
if isinstance(microliters,str) and ':' in microliters:
return Unit(microliters).to('microliter')
return Unit(microliters,"microliter") | 4d5d489191166a76e02cdc0211d52bec45cd65e1 | 9,931 |
def read_glh(filename):
"""
Read glitch parameters.
Parameters
----------
filename : str
Name of file to read
Returns
-------
glhParams : array
Array of median glitch parameters
glhCov : array
Covariance matrix
"""
# Extract glitch parameters
glh... | 6948e0f5571c6d5f7a62dad1fb136cec48e476ae | 9,932 |
def update_user_group(user_group_id, name, **options):
"""
Update a user group
:param user_group_id: The id of the user group to update
:type user_group_id: str
:param name: Name of the user group
:type name: str, optional
:param options: ... | 20784b935675c459b7dc258c210aedd86d7b4fb9 | 9,933 |
def longitudinal_kmeans(X, n_clusters=5, var_reg=1e-3,
fixed_clusters=True, random_state=None):
"""Longitudinal K-Means Algorithm (Genolini and Falissard, 2010)"""
n_time_steps, n_nodes, n_features = X.shape
# vectorize latent positions across time
X_vec = np.moveaxis(X, 0, -1).... | a76581a7784480fa90afa9ab9e080a09ce5662f4 | 9,934 |
import decimal
def do_payment(
checkout_data, # Dict[str, str]
parsed_checkout, # Dict[str, str]
enable_itn, # type: bool
): # type: (...) -> Dict[str, str]
"""
Common test helper: do a payment, and assert results.
This takes a checkout's data and page parse (for session info ... | f69383f779ce68ef28ced79d794479a4e3a4dff9 | 9,935 |
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available... | f20911b11beaf3483d1f2f829c63d654cb0557ef | 9,936 |
def SPEED_OF_LIGHT():
"""
The `SPEED_OF_LIGHT` function returns the speed of light in vacuum
(unit is ms-1) according to the IERS numerical standards (2010).
"""
return 299792458.0 | 5f0b6e6fb81018983d541a6492eb2c5aac258ff6 | 9,937 |
def filter_by_is_awesome(resources):
"""The resources being that is_awesome
Arguments:
resources {[type]} -- A list of resources
"""
return [resource for resource in resources if resource.is_awesome] | 46717a93e75dfed53bba03b5b7f8a5e8b8315876 | 9,939 |
def topograph_image(image, step):
"""
Takes in NxMxC numpy matrix and a step size and a delta
returns NxMxC numpy matrix with contours in each C cell
"""
step_gen = _step_range_gen(step)
new_img = np.array(image, copy=True)
"""step_gen ~ (255, 245, 235, 225,...) """
def myfunc(color):... | c3a340c422bb16de83b132506e975fecf21a335c | 9,940 |
def _etag(cur):
"""Get current history ETag during request processing."""
h_from, h_until = web.ctx.ermrest_history_snaprange
cur.execute(("SELECT _ermrest.tstzencode( GREATEST( %(h_until)s::timestamptz, (" + _RANGE_AMENDVER_SQL + ")) );") % {
'h_from': sql_literal(h_from),
'h_until': sql_li... | bd04dca4ef140003c0df867fa258beb5c60c77dd | 9,941 |
def MakeListOfPoints(charts, bot, test_name, buildername,
buildnumber, supplemental_columns):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names... | fe903667b0e3a4c381dbcbc3205ba87b2d0ef26b | 9,943 |
import csv
from io import StringIO
def parse_csv(string):
"""
Rough port of wq/pandas.js to Python. Useful for validating CSV output
generated by Django REST Pandas.
"""
if not string.startswith(','):
data = []
for row in csv.DictReader(StringIO(string)):
for key, val ... | bdf32e3ff1a2d63c568200e75d5f694ef5f49ce9 | 9,944 |
def list_system_configurations():
"""
List all the system configuration parameters
Returns:
.. code-block:: python
[
{
"ParameterName": "ParameterValue"
},
...
]
Raises:
500 - ChaliceViewError... | 5989cc6f1bd79e5f7bd4889883dccb7fa9bf1bd4 | 9,945 |
def add_dbnsfp_to_vds(hail_context, vds, genome_version, root="va.dbnsfp", subset=None, verbose=True):
"""Add dbNSFP fields to the VDS"""
if genome_version == "37":
dbnsfp_schema = DBNSFP_SCHEMA_37
elif genome_version == "38":
dbnsfp_schema = DBNSFP_SCHEMA_38
else:
raise ValueEr... | f3c652c77858b9e859bd47e48002a1de3d865fa0 | 9,946 |
import torch
def get_wav2vec_preds_for_wav(
path_to_wav: str,
model,
processor,
device: torch.device,
bs: int = 8,
loading_step: float = 10,
extra_step: float = 1,
) -> str:
"""
Gets binary predictions for wav file with a wav2vec 2.0 model
Args:
path_to_wav (str): abso... | 2f9abc97559d1853631dcdf79599190714f618c8 | 9,948 |
def header_lines(filename):
"""Read the first five lines of a file and return them as a list of strings."""
with open(filename, mode='rb') as f:
return [f.readline().decode().rstrip() for _ in range(5)] | 35056152c1566ea2d14452308f00d6903b6e4dff | 9,951 |
async def load_last_cotd(chat_id: int):
"""Load the time when the user has last received his card of the day.
Args:
chat_id (int): user chat_id
"""
QUERY = "SELECT last_cotd FROM users WHERE id = %(id)s"
async with aconn.cursor() as cur:
await cur.execute(QUERY, {"id": chat_id})
... | 2e2aabc18a014e9f96fee91f6e8d85b875edcf2a | 9,953 |
from pathlib import Path
import json
import torch
def load_model(targets, model_name='umxhq', device='cpu'):
"""
target model path can be either <target>.pth, or <target>-sha256.pth
(as used on torchub)
"""
model_path = Path(model_name).expanduser()
if not model_path.exists():
raise No... | 8fdafa6ac28ed2277337dc1f3ded295668963c8a | 9,954 |
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import List
import scipy
def model_gradient_descent(
f: Callable[..., float],
x0: np.ndarray,
*,
args=(),
rate: float = 1e-1,
sample_radius: float = 1e-1,
n_sample_point... | d5bd32f21cdc871175c3f4c1601c1da240866e14 | 9,955 |
def index():
"""Show the index."""
return render_template(
"invenio_archivematica/index.html",
module_name=_('Invenio-Archivematica')) | 9c5e62bc29466bd4eae463d1dcd71c0d880fc5f8 | 9,956 |
import re
def word_detokenize(tokens):
"""
A heuristic attempt to undo the Penn Treebank tokenization above. Pass the
--pristine-output flag if no attempt at detokenizing is desired.
"""
regexes = [
# Newlines
(re.compile(r'[ ]?\\n[ ]?'), r'\n'),
# Contractions
(re.... | 577c2ed235aaf889699efc291d2b206a922f1f4a | 9,959 |
def googlenet_paper(pretrained=False, **kwargs):
"""
GoogLeNet Model as given in the official Paper.
"""
kwargs['aux'] = True if 'aux' not in kwargs else kwargs['aux']
kwargs['replace5x5with3x3'] = False if 'replace5x5with3x3' not in kwargs \
else kwargs['replace5x5with3x3']
... | 01eaf2cf89648b334f634e83ca2d774e58970999 | 9,960 |
def is_regex(obj):
"""Cannot do type check against SRE_Pattern, so we use duck typing."""
return hasattr(obj, 'match') and hasattr(obj, 'pattern') | cfd4fc702fb121735f49d4ba61395ce8f6508b1a | 9,963 |
import functools
def GetDefaultScopeLister(compute_client, project=None):
"""Constructs default zone/region lister."""
scope_func = {
compute_scope.ScopeEnum.ZONE:
functools.partial(zones_service.List, compute_client),
compute_scope.ScopeEnum.REGION:
functools.partial(regions_servi... | 25069007b68a74b26e2767e146c25466b65e3377 | 9,964 |
def find_user(username):
"""
Function that will find a user by their username and return the user
"""
return User.find_by_username(username) | ef036f0df72bbdcb9aa8db519120209e20678e83 | 9,965 |
from typing import List
def filter_by_author(resources: List[Resource], author: Author) -> List[Resource]:
"""The resources by the specified author
Arguments:
resources {List[Resource]} -- A list of resources
"""
return [resource for resource in resources if resource.author == author] | d03673ed8c45f09996e29eb996fc31fa3d073315 | 9,966 |
import time
def cpu_bound_op(exec_time, *data):
"""
Simulation of a long-running CPU-bound operation
:param exec_time: how long this operation will take
:param data: data to "process" (sum it up)
:return: the processed result
"""
logger.info("Running cpu-bound op on {} for {} seconds".form... | a52d3e25a75f9c7b0ab680a9ad1cb0e5d40de92a | 9,967 |
def elastic_transform_approx(
img,
alpha,
sigma,
alpha_affine,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
value=None,
random_state=None,
):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications for speed).
Based on https://gis... | 9684847e756e0299be6766b5bb8220e6a1b4fc8d | 9,968 |
def jitter(t, X, amountS):
"""Return a random number (intended as a time offset, i.e. jitter) within the range +/-amountS
The jitter is different (but constant) for any given day in t (epoch secs)
and for any value X (which might be e.g. deviceID)"""
dt = ISO8601.epoch_seconds_to_datetime(t)
d... | db62c4365bf4cbf9d2ed0587c51846a274a691a4 | 9,970 |
import string
def check_DNA(DNA_sequence):
"""Check that we have a DNA sequence without junk"""
#
# Remove all spaces
DNA_sequence=string.replace(DNA_sequence,' ','')
# Upper case
DNA_sequence=string.upper(DNA_sequence)
# Check that we only have DNA bases in the seq
ok=1
garbage={}... | d73b8176938716b5c3710055750f05b24eab80a5 | 9,971 |
def read():
"""
read() : Fetches documents from Firestore collection as JSON
warehouse : Return document that matches query ID
all_warehouses : Return all documents
"""
try:
warehouse_id = request.args.get('id')
if warehouse_id:
warehouse = warehouse_ref.d... | 17d3622f9f0770edb333907298112487432c7025 | 9,972 |
def transposeC(array, axes=None):
"""
Returns the (conjugate) transpose of the input `array`.
Parameters
----------
array : array_like
Input array that needs to be transposed.
Optional
--------
axes : 1D array_like of int or None. Default: None
If *None*, reverse the di... | fecd60d72c4c38dc87d59f430365cefe72f40ef4 | 9,973 |
from typing import List
def _partition_files(files: List[str], num_partitions: int) -> List[List[str]]:
"""Split files into num_partitions partitions of close to equal size"""
id_to_file = defaultdict(list)
for f in files:
id_to_file[_sample_id_from_path(f)[0]].append(f)
sample_ids = np.array(... | e9fac329f8e1c1c7682984216c34e7b259776c82 | 9,974 |
import json
from pathlib import Path
import time
import requests
def run_vscode_command(
command: str,
*args: str,
wait_for_finish: bool = False,
expect_response: bool = False,
decode_json_arguments: bool = False,
):
"""Execute command via vscode command server."""
# NB: This is a hack to ... | 4fa3626f1371c0c03923f37136616fb7055ef9cf | 9,975 |
import threading
def run_with_timeout(proc, timeout, input=None):
"""
Run Popen process with given timeout. Kills the process if it does
not finish in time.
You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise
the output will be None.
The returncode is 999 if the proce... | 414e18dae8f31b20c472f7da14475f8da5761781 | 9,976 |
def dot(x, y, alpha=0):
"""
Compute alpha = xy + alpha, storing the incremental sum in alpha
x and y can be row and/or column vectors. If necessary, an
implicit transposition happens.
"""
assert type(x) is matrix and len(x.shape) is 2, \
"laff.dot: vector x must be a 2D num... | 2ef9fd4b02a586e9caff70b75bd598e925608171 | 9,977 |
def load_train_test_data(
train_data_path, label_binarizer, test_data_path=None,
test_size=None, data_format="list"):
"""
train_data_path: path. path to JSONL data that contains text and tags fields
label_binarizer: MultiLabelBinarizer. multilabel binarizer instance used to transform tags
... | 51aaf916f948b198e1f25c002655731008c173ed | 9,978 |
def get_token() -> str:
"""Obtains the Access Token from the Authorization Header"""
# Get the authorization header
authorization_header = request.headers.get("Authorization", None)
# Raise an error if no Authorization error is found
if not authorization_header:
payload = {
"co... | 5e1d05f705ad1c7505963e96c8637e5ab42aff79 | 9,980 |
def convert_op_str(qubit_op_str, op_coeff):
"""
Convert qubit operator into openfermion format
"""
converted_Op=[f'{qOp_str}{qNo_index}' for qNo_index, qOp_str in enumerate(qubit_op_str) if qOp_str !='I']
seperator = ' ' #space
Openfermion_qubit_op = QubitOperator(seperator.join(conver... | a6a512758a706b3a788f686331747ac9224c2f8b | 9,982 |
def __state_resolving_additional_facts(conversation, message, just_acknowledged):
"""
Bot is asking the user questions to resolve additional facts
:param conversation: The current conversation
:param message: The user's message
:param just_acknowledged: Whether or not an acknowledgement just happene... | d1e75e0d67aa2b1bcc899885c83132a47df015dc | 9,983 |
import json
def load_dataset(path):
"""Load json file and store fields separately."""
with open(path) as f:
data = json.load(f)['data']
output = {'qids': [], 'questions': [], 'answers': [],
'contexts': [], 'qid2cid': []}
for article in data:
for paragraph in article['para... | 4ba01f49d6a0aa3329b076fc0de9dd38fb99f2f8 | 9,984 |
def generate_rand_enex_by_prob_nb(shape: tp.Shape,
entry_prob: tp.MaybeArray[float],
exit_prob: tp.MaybeArray[float],
entry_wait: int,
exit_wait: int,
... | fbb7fc4bcf50139f455049edd4af62e9c0429dd3 | 9,985 |
from datetime import datetime
import html
import requests
from textwrap import dedent
def retrieve(last_updated=datetime.now()):
""" Crawls news and returns a list of tweets to publish. """
print('Retrieving {} alzheimer news since {}.'.format(SITE, last_updated))
to_ret = list()
# Get all the conte... | ba194b84a50164ca8a238a77d0e80d5e80c93ae2 | 9,988 |
import binascii
def check_seal(item):
"""
Given a message object, use the "seal" attribute - a cryptographic
signature to prove the provenance of the message - to check it is valid.
Returns a boolean indication of validity.
"""
try:
item_dict = to_dict(item)
raw_sig = item_dict... | 86bb7b22d2efe4e7117b3c65fad2a7dc4853b428 | 9,989 |
def plot_win_prob(times, diff, end_lim, probs, team_abr, bools):
""" This function plots the win probability and
score differential for the game
@param times (list): list containing actual_times
and times. times contains all of the times at
which win probability was calculated
@param di... | 139906b4a2db3cf3a7ffa531ec0701be0d395b13 | 9,990 |
def add_office():
"""Given that i am an admin i should be able to add a political office
When i visit ../api/v2/offices endpoint using POST method"""
if is_admin() is not True:
return is_admin()
errors = []
try:
if not request.get_json(): errors.append(
make_response(j... | ee990cb55ca819a1b4fdd2eed4346f7fca21a7c3 | 9,991 |
from pathlib import Path
import click
def send_message(
mg: mailgun.MailGun,
templates: t.Tuple[str, str],
contact_name: str,
contact_email: str,
sender: str,
reply_to: str,
sponsorship_package: t.Optional[Path],
dry_run: bool,
) -> bool:
"""
Send an individual email and report... | 62d03de5fa7a3c579ff2351e2c4623b3bf0e8a8e | 9,992 |
def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:
"""
TensorFlow dataset instance with multi-class sparse labels (5 classes)
:return: Multi-class sparse (labels) classification dataset
"""
# Create features
X = tf.random.normal(shape=(100, 3))
# Create one multi-... | 05bd5f809e08fde21270c286351ed32b9ed2cb97 | 9,993 |
def skipIfNAN(proteinPath):
""" Test if there is a NAN (not a number) in the lists """
overlapArrayWhole = None
overlapArrayInterface = None
overlapTApproxWhole = None
overlapTApproxInterface = None
try:
overlapArrayWhole = np.loadtxt(proteinPath+"overlapArrayWhole.txt")
except IOErr... | 0993fe55879e2c965b9856435e38f3a33d803e33 | 9,994 |
import json
def alignment_view(request, project_uid, alignment_group_uid):
"""View of a single AlignmentGroup.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome__proj... | 50f9420dca7c939524e1e243d667ddd76d7687d0 | 9,995 |
def get_matching_string(matches, inputText, limit=0.99):
"""Return the matching string with all of the license IDs matched with the input license text if none matches then it returns empty string.
Arguments:
matches {dictionary} -- Contains the license IDs(which matched with the input text) with th... | be0fe152e530ec8244f892bfb4887b78bf89027b | 9,997 |
def get_review(annotation):
"""
Get annotation's review (if exists).
"""
try:
review = Comment.objects.get(annotation=annotation)
return review
except Comment.DoesNotExist:
return None | 89aeee2dc8811c57265ebbc30ede9cfafcd5e696 | 9,998 |
def load_grid(grdfiles, blocks, dimpart, nsigma, **kwargs):
"""Setup a `grid` by reading `grdfiles` on `blocks`
"""
ncgrid = nct.MDataset(grdfiles, blocks, dimpart, **kwargs)
# dummy time, to be updated later
time = ma.Marray(np.arange(10), dims=tdims)
lat = nct.readmarray(ncgrid, "lat_rho", h... | 08d102c4a1ef163e2af4801d7ffe2b572b747a58 | 9,999 |
import inspect
def with_patch_inspect(f):
"""decorator for monkeypatching inspect.findsource"""
def wrapped(*args, **kwargs):
save_findsource = inspect.findsource
save_getargs = inspect.getargs
inspect.findsource = findsource
inspect.getargs = getargs
try:
... | 711fa3099b0c6242b623305237f950120b3de19a | 10,000 |
def apply_hux_f_model(r_initial, dr_vec, dp_vec, r0=30 * 695700, alpha=0.15, rh=50 * 695700, add_v_acc=True,
omega_rot=(2 * np.pi) / (25.38 * 86400)):
"""Apply 1d upwind model to the inviscid burgers equation.
r/phi grid. return and save all radial velocity slices.
:param r_initial: 1... | 31fb582cc8d31702d8ac8aabb2dd099f169b0c08 | 10,001 |
import inspect
def requires_request_arg(method):
"""
Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show,
get_context and render_html all accepted both 'request' and 'parent_context' as arguments
"""
try:
# see if this is a pre-2.15 get_url method that ta... | 0ec09e34c04d4d54762051b01af8c80754d47125 | 10,002 |
def show_output_to_df(
show_output: str,
spark_session: SparkSession,
default_data_type: str = 'string'
):
"""
Takes a string containing the output of a Spark DataFrame.show() call and
"rehydrates" it into a new Spark DataFrame instance. Example input:
+--------+--------+
|co... | 0dd9372b29d191a846ac4a1e2251c118e4a01102 | 10,003 |
import math
def Schwefel(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Schwefel function."""
del seed
dim = len(arr)
bernoulli_arr = np.array([pow(-1, i + 1) for i in range(dim)])
x_opt = 4.2096874633 / 2.0 * bernoulli_arr
x_hat = 2.0 * (bernoulli_arr * arr) # Element-wise multipli... | 1588dc5fa7864c3bd7ed5639ca44dafcd5d7f405 | 10,004 |
def article_idx_to_words_row(article_idx):
"""
Given a tuple with an article and an index, return a Row with the
index ad a list of the words in the article.
The words in the article are normalized, by removing all
non-'a-z|A-Z' characters.
Any stop words (words of less than 2 characters) are ... | 8a956e6be7d0b3e3076219929b8e5e2358f856ab | 10,005 |
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def get_device_strategy(device, half=False, XLA=False, verbose=True):
"""
Returns the distributed strategy object, the tune policy anb the number of replicas.
Parameters
----------
device : str
Possible v... | c0c5d29490876812d3a3724638a17ebb0abdd54e | 10,006 |
def make_mask(
pois_gdf,
link_gdf,
):
"""
:param pois_gdf:
:param link_gdf:
:return:
"""
mask = np.array([])
enum = np.array([])
return mask, enum | bd31fe0c0c9f1f1f38d1c4e1bf26bdeb3f2806ca | 10,007 |
def SEMIMINUS(r1, r2):
"""aka NOT MATCHING
(macro)"""
return MINUS(r1, SEMIJOIN(r1, r2)) | 225e3385b03420a52fb11703ee58a251ff2bacd6 | 10,008 |
def Ineg_wrapper(valS, valI):
"""
Function used to wrap Inequalities into a suitable form for optimisation
valS > valI --> Inequality is satisfied
valS and valI can be float or 1d array
"""
epsilon = 1e-6
top = 1e3
ecart = valI - valS
if ecart < epsilon:
out = np.exp(ecart)... | 1bf1f664845de8cc13750d6d021c1058687d91cc | 10,009 |
def preprocess_imgs(set_name, img_size):
"""
Resize and apply VGG-15 preprocessing
"""
set_new = []
for img in set_name:
img = cv2.resize(
img,
dsize=img_size,
interpolation=cv2.INTER_CUBIC
)
set_new.append(tf.keras.applications.vgg16.prepr... | 52f1b677a053feac585b57847aab32c8d38c5b30 | 10,010 |
def get_shapes(galsim_img, center):
""" Get shapes
This function compute the moments of an image. Then return the sigma of the
window function used (size of the object) and the amplitude
(flux of the object).
Parameters
---------
galsim_img : galsim.image.Image
Galsim.image object ... | 3d6520d129c0c6bea93f91e332b477b777041a0b | 10,011 |
def ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,
refsym):
"""
ascending super operator for a modified binary MERA
ascends 'hamAB' and 'hamBA' up one layer
Args:
hamAB (tf.Tensor): local Hamiltonian on the A-B lattice
hamBA (tf.Tenso... | 8692d2c0d02e82cb691c24977091665015aecdc6 | 10,012 |
def filteredhash(repo, maxrev):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
tiprev and tipnode stored in the cache file match the current repository.
However, this is not sufficient for validating repoviews because the ... | de606e22c499eb53d6d83f68900d234e76498e35 | 10,013 |
import logging
def filter_blast_by_amplicon(blast_hits, min_amplicon_len, max_amplicon_len):
"""
Filtering primers by putative amplicon that would be generated.
If the amplicon size is outsize of the min/max, then the primers not legit off-targets.
"""
logging.info('Filtering to only hits producin... | 8f84a5d615f65e7c21d5135d3f585b91c0f4667b | 10,014 |
def determine_channel(channel_as_text):
"""Determine which channel the review is for according to the channel
parameter as text, and whether we should be in content-review only mode."""
if channel_as_text == 'content':
# 'content' is not a real channel, just a different review mode for
# lis... | db8eeaae3c953cf497135f4d6e6071275a626dc2 | 10,016 |
import requests
def get_device_config(device_name, dnac_jwt_token):
"""
This function will get the configuration file for the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return: configuration file
"""
device_id = get_device_id_... | b092efbe307f3f7a73cc998275ad67ea064cd3ed | 10,018 |
def get_ideas():
"""
Gets all ideas from mongo
"""
return find('ideas') | e6c8a152c2bca775e17d6fa52b262b334ac693c0 | 10,019 |
import numpy
def uppercase_dtype(dtype):
""" Convert a dtype to upper case. A helper function.
Do not use.
"""
pairs = dict([(key.upper(), dtype.fields[key]) for key in dtype.names])
dtype = numpy.dtype(pairs)
return dtype | bf28581dbb6a857a12c1b056a5e1b6f7bdbbbc27 | 10,020 |
from typing import Dict
def mlp_prior(input_dim: int, zdim: int = 2) -> Dict[str, jnp.array]:
"""Priors over weights and biases in the default Bayesian MLP"""
hdim = [64, 32]
def _bnn_prior(task_dim: int):
w1 = sample_weights("w1", input_dim, hdim[0], task_dim)
b1 = sample_biases("b1", hd... | 29c1d751f09a8da0c9f68209a5bcd48db12e1ca1 | 10,022 |
def get_biggest_spread_by_symbol(exchanges, symbol):
"""Get biggest spread by symbol."""
ask_exchange_id = ""
min_ask_price = 99999999
bid_exchange_id = ""
max_bid_price = 0
for exchange_id in exchanges:
exchange = eval("ccxt.{0}()".format(exchange_id))
try:
order_... | 20eda8274e513d1e098c34c309833c58be6dbb4e | 10,023 |
def update_user():
"""User update route
:return: action status
"""
if 'data' in request.json:
data = request.json['data']
if ('profile' in data) and ('theme' in data['profile']):
current_user.profile.theme = data['profile']['theme']
services.db.session.commit()
r... | f6b98a0e06f7b898737ffa0e6c395f2ddd18fc7b | 10,025 |
from typing import List
def on_deck(elements: List[int], all_vars):
"""all of the elements must be within the deck"""
rules = []
for element in elements:
var = all_vars[element - 1]
rules.append(var >= 1)
rules.append(var <= 52)
return rules | 2e90dfa45bd90a7c3b834000e070631af5952f36 | 10,026 |
def transform_data_to_dictionary(elements):
"""Parses each element in the list and parses it in a dictionary
Args:
elements (list): list of html elements
Returns:
dictionary: treated information.
"""
url_informations = {}
for n in range(0, len(elements), 2):
url_informa... | fd81fe7b6093577f32e460cb8a4d22cbbec92789 | 10,027 |
import math
import torch
def postprocess_new(u, x, lr_min, lr_max, num_itr, rho=0.0, with_l1=False,s=math.log(9.0)):
"""
:param u: utility matrix, u is assumed to be symmetric, in batch
:param x: RNA sequence, in batch
:param lr_min: learning rate for minimization step
:param lr_max: learning rate... | 51fb589a2a8ccaeb96b06192f6050ded91f81f07 | 10,028 |
def parse_api_error(response):
"""
Parse the error-message from the API Response.
Assumes, that a check if there is an error present was done beforehand.
:param response: Dict of the request response ([imdata][0][....])
:type response: ``dict``
:returns: Parsed Error-Text
:rtype: ``str``
... | acc4256b3245e3e2c10e3ba998bf577e0f51a33e | 10,029 |
from typing import Union
def login_manual_user_device(username: str, password: str, mac_address: str) -> Union[str, Token]:
"""Try to login by username and password. A token for auto-login is returned"""
possible_user = User.get_by_username(username)
if possible_user is None:
fail_msg = f"No user ... | 5dd6e1043ffea2cceacf1fc83e9713b4b0fd827b | 10,030 |
def corrector_new(Ybus, Ibus, Sbus, V0, pv, pq, lam0, Sxfr, Vprv, lamprv, z, step, parametrization, tol, max_it,
verbose, max_it_internal=10):
"""
Solves the corrector step of a continuation power flow using a full Newton method
with selected parametrization scheme.
solves for bus vol... | e4ff6d31916c34768152af998c1bc5ff4fdebcb7 | 10,031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.