content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import logging
def update_softwaretitle_packages(api, jssid, pkgs):
"""
Update packages of software title
:param jssid: Patch Software Title ID
:param pkgs: dict of {version: package, ...}
:returns: None
"""
logger = logging.getLogger(__name__)
data = api.get(f"patchs... | 0acb3dfbff0e85a2e8a876d5e5d484c4d1e52068 | 5,900 |
from typing import List
def get_balances(session: Session, redis: Redis, user_ids: List[int]):
"""Gets user balances.
Returns mapping { user_id: balance }
Enqueues in Redis user balances requiring refresh.
"""
# Find user balances
query: List[UserBalance] = (
(session.query(UserBalance... | 82f6fdf0fcc8bcd241c97ab50a89ba640793b704 | 5,901 |
from typing import Optional
from typing import Tuple
def kmeans(observations: ndarray, k: Optional[int] = 5) -> Tuple[ndarray, ndarray]:
"""Partition observations into k clusters.
Parameters
----------
observations : ndarray, `shape (N, 2)` or `shape (N, 3)`
An array of observations (x, y) to... | 1a8cb2e61e8d96a45d4165edf1b148fd7c8ab5e3 | 5,902 |
def encloses(coord, points):
""" """
sc = constants.CLIPPER_SCALE
coord = st(coord.to_list(), sc)
points = st(points, sc)
return pyclipper.PointInPolygon(coord, points) != 0 | d5d7aeb8f52087653027d57c7a718832dbf32200 | 5,903 |
def arpls(y, lam, ratio=1e-6, niter=1000, progressCallback=None):
"""
Return the baseline computed by asymmetric reweighted penalized least squares smoothing, arPLS.
Ref: Baseline correction using asymmetrically reweighted penalized least squares smoothing
Sung-June Baek, Aaron Park, Young-Jin Ahn a... | d149397827d89b8708a09f4ceb7c38c989d99e17 | 5,904 |
async def test_function_raised_exception(dut):
"""
Test that exceptions thrown by @function coroutines can be caught
"""
@cocotb.function
async def func():
raise ValueError()
@external
def ext():
return func()
with pytest.raises(ValueError):
await ext() | acd31a1142dea0cd300861e75721a4597e2b5bbc | 5,905 |
def dismiss_notification_mailbox(notification_mailbox_instance, username):
"""
Dismissed a Notification Mailbox entry
It deletes the Mailbox Entry for user
Args:
notification_mailbox_instance (NotificationMailBox): notification_mailbox_instance
username (string)
Return:
bo... | 9955361ac42c079adefcd8402fb9a1d5e3822a57 | 5,906 |
import operator
def knn(x, y, k, predict_x):
"""
knn็ฎๆณๅฎ็ฐ๏ผไฝฟ็จๆฌงๆฐ่ท็ฆป
:param x: ๆ ทๆฌๅผ
:param y: ๆ ็ญพ
:param k: ไธชๆฐ
:return:
"""
assert isinstance(y, np.ndarray)
y = y.flatten('F')
def cal_distance(a, b):
return np.sqrt(np.sum(np.power(a - b, 2), axis=0))
dists = {
... | 425095898acce2fc966d00d4ba6bc8716f1062f8 | 5,907 |
def piano():
"""A piano instrument."""
return lynames.Instrument('Piano', abbr='Pno.', transposition=None,
keyboard=True, midi='acoustic grand',
family='percussion', mutopianame='Piano') | 792a1dd3655ac038bdde27f9d1ad27451e2b9121 | 5,908 |
from typing import Any
from typing import Dict
def extract_fields(obj: Any) -> Dict[str, Any]:
"""A recursive function that extracts all fields in a Django model, including related fields (e.g. many-to-many)
:param obj: A Django model
:return: A dictionary containing fields and associated values
"""
... | bc6b45a82ab2a336e116ce528aaed45b2b77ef39 | 5,909 |
from re import T
def decomposeM(modified):
"""Auxiliary in provenance filtering: split an entry into name and date."""
splits = [m.rsplit(ON, 1) for m in modified]
return [(m[0], dtm(m[1].replace(BLANK, T))[1]) for m in splits] | 1f613d11d2f8c3ceec4f6c853b9412b5b7eb3e0c | 5,910 |
import logging
def update(data):
"""
TODO:
find a way to call collection.findOneAndUpdate(), currently pymodm .update()
only returns the number of updated record.
"""
try:
required_fields = ['id']
validator.validate_required_fields(required_fields, data)
cleane... | a3895574b5e811e91db2063bdabc3bd297d7a904 | 5,911 |
def get_2d_peaks_coords(
data: np.ndarray, size: int = None, threshold: float = 0.5
) -> np.ndarray:
"""Detect peaks in image data, return coordinates.
If neighborhoods size is None, default value is the highest value
between 50 pixels and the 1/40th of the smallest image dimension.
Detection thre... | 815979bd0105acc7bb3fb58db691a8963d9ca2f4 | 5,912 |
def border_positions_from_texts(texts, direction, only_attr=None):
"""
From a list of textboxes in <texts>, get the border positions for the respective direction.
For vertical direction, return the text boxes' top and bottom border positions.
For horizontal direction, return the text boxes' left and rig... | 8b0f57e21b015b6092104454195254861432b610 | 5,913 |
def progress(self):
"""Check if foo can send to corge"""
return True | 89a0c9671645f9fa855db35bf5e383145d6b7616 | 5,914 |
def write_sample_sdf(input_file_name, valid_list):
"""
Function for writing a temporary file with a subset of pre-selected
structures
:param input_file_name: name of input file
:param valid_list: list of indexes of pre-selected structures
:return: name of subsampled file
"""
sample_fil... | 0b22c14452f6de978e7ea811d761195d92bfe6c4 | 5,915 |
import math
def rotx(theta, unit="rad"):
"""
ROTX gives rotation about X axis
:param theta: angle for rotation matrix
:param unit: unit of input passed. 'rad' or 'deg'
:return: rotation matrix
rotx(THETA) is an SO(3) rotation matrix (3x3) representing a rotation
of THETA radians about th... | b05a6116c64837de163ad26dc36ffe1a7166635d | 5,916 |
from typing import Sequence
def _table(*rows: Sequence) -> str:
"""
>>> _table(['a', 1, 'c', 1.23])
'|a|1|c|1.23|'
>>> _table(['foo', 0, None])
'|foo|||'
>>> print(_table(['multiple', 'rows', 0], ['each', 'a', 'list']))
|multiple|rows||
|each|a|list|
"""
return '\n'.join([
... | d566da2ad9240e73b60af00d3e4b4e25607234b4 | 5,917 |
def trunc(s, n):
"""
Truncate a string to N characters, appending '...' if truncated.
trunc('1234567890', 10) -> '1234567890'
trunc('12345678901', 10) -> '1234567890...'
"""
if not s:
return s
return s[:n] + '...' if len(s) > n else s | 0f3c9f03f566f9f50a557f6b5592ec20a12e92bc | 5,918 |
import os
def sgrib_variable_crop(tmp_grib, nthreads_w, fp_out, logger):
"""
Take the small grib file from grib_to_small_grib and cut it down
to the variables we need
Args:
tmp_grib: File path to small grib2 file
nthreads_w: Number of threads for running wgrib2 commands
fp_o... | 25c55b6e8a2af23e10c11aa5e7df2caa9cd79bdd | 5,919 |
def cs_geo():
"""Geographic lat/lon coordinates in WGS84 datum.
"""
cs = CSGeo()
cs.inventory.datumHoriz = "WGS84"
cs.inventory.datumVert = "mean sea level"
cs.inventory.spaceDim = 2
cs._configure()
cs.initialize()
return cs | 28df90e7b1490d681c9d13f4604dbc3966d896dc | 5,920 |
def make_range(value):
"""
Given an integer 'value',
return the value converted into a range.
"""
return range(value) | 385d23eaebd04249f9384e0d592b7fb3a9bbb457 | 5,921 |
def run(actor, observer, content):
"""
Shortcut to run an Onirim and return the result.
Returns:
True if win, False if lose, None if other exception thrown.
"""
return Flow(Core(actor, observer, content)).whole() | 03b1dee5bd993d8a88debd558878de5a32e9c318 | 5,922 |
def GetPoseBoneFCurveFromArmature(armatureObj, poseBoneName, data_path, parameterIndex):
"""
In Blender the FCurves are used to define the Key Frames.
In general, for a single object, there's one FCurve for each of
the following properties.
data_path, index
'location', 0 (.x... | 450d98306adf43ea171dffa0fe6afa71ebabce57 | 5,923 |
def get_document_instance(conf=None):
"""
Helper function to get a database Document model instance based on CLA configuration.
:param conf: Same as get_database_models().
:type conf: dict
:return: A Document model instance based on configuration specified.
:rtype: cla.models.model_interfaces.D... | 054f6ff6acc38ed44a9bd2a97e0598ed34b322f8 | 5,924 |
from typing import List
def get_full_private_keys(gpg: gnupg.GPG) -> List[GPGKey]:
"""Get a list of private keys with a full private part.
GPG supports exporting only the subkeys for a given key, and in this case
a stub of the primary private key is also exported (the stub). This stub
cannot be used ... | d2bbb248613c3be9ed103212e0ca2a433de07e03 | 5,925 |
def create_blueprint():
"""Creates a Blueprint"""
blueprint = Blueprint('Health Check Blueprint', __name__)
blueprint.route('/')(healthcheck.healthcheck)
return blueprint | 348c6ff172bb0d230d83eab73dd451edba0d1b00 | 5,926 |
def playable_card(card, fireworks, n_colors):
# if isinstance(card, pyhanabi.HanabiCard):
# card = {'color':colors[card.color],'rank':card.rank}
"""A card is playable if it can be placed on the fireworks pile."""
if (card.color == pyhanabi.HanabiCard.ColorType.kUnknownColor
and card().rank != pyh... | a96c6935c6b57ead9c639f13d8eccccbaf21aa4b | 5,927 |
def get_transformation_id(action):
""" Get the id of a transformation.
Parameters
----------
action: function
The transformation function
Returns
-------
int
The id of the action (-1 if not found)
"""
for index, trans in TRANSFORMATIONS.items():
if trans == ... | 2f08e7bb2b0418d39421e6b03e011d8ab4d68380 | 5,928 |
def getString(t):
"""If t is of type string, return it, otherwise raise InvalidTypeError.
"""
s = c_char_p()
if PL_get_chars(t, byref(s), REP_UTF8|CVT_STRING):
return s.value
else:
raise InvalidTypeError("string") | 1f128369f1ce3950ed352e43eea5db30f6da2d6e | 5,929 |
def prep_data(filename, in_len, pred_len):
"""load data from the file and chunk it into windows of input"""
# Columns are
# 0:datetime, 1:temperature, 2:humidity, 3:pressure, 4:wind_direction, 5:wind_speed
data = np.genfromtxt(filename, delimiter=',', skip_header=1,
usecols=(1, ... | 33e1348acdcf6025159b7ed81e18358d56838d3e | 5,930 |
import subprocess
import os
import glob
def should_build_ib():
"""
Helper function that detects the system's IB support and returns if we
should build with IB support.
"""
ib_util_found = False
ib_lib_found = False
ib_header_found = False
try:
# If the command doesn't exist, w... | f5b26870f39b124690a7869e2c56997d51e6d499 | 5,931 |
def _get_security_group_id(connection, security_group_name):
"""
Takes a security group name and
returns the ID. If the name cannot be
found, the name will be attempted
as an ID. The first group found by
this name or ID will be used.)
:param connection:
:param security_group_name:
:... | 70c9b8357a9634043f07ad0019ff3cc621ba859c | 5,932 |
def viz_preprocessing(df_path):
"""
Preprocess the aggregation csv into a good format for visualization
"""
df = pd.read_csv(df_path)
res = df.T
res = res.rename(columns=res.iloc[0]).drop(res.index[0])
res = res.astype("int64")
res.reset_index(inplace=True)
res["index"] = res["index"... | fc1c39d094934aa47ac26f6e5a70f071c1df4fbd | 5,933 |
def build_encoded_broadcast_from_model(model_fn, encoder_fn):
"""Builds `StatefulBroadcastFn` for weights of model returned by `model_fn`.
This method creates a `SimpleEncoder` for every weight of model created by
`model_fn`, as returned by `encoder_fn`.
Args:
model_fn: A Python callable with no arguments... | 59b7290fe00b565467a66f72f6591f27448b9372 | 5,934 |
def adjacency(G, nodelist=None, weight="weight"):
"""
Returns the sparse adjacency matrix
representation of the graph.
"""
if nodelist is None:
nodelist = G.nodes()
A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format="csr")
return A | e17c0030a7d2c4e13659ca3585820e1c8da89101 | 5,935 |
from datetime import datetime
def sample_movie(user, **params):
"""Create and return a movie"""
defaults = {
'title': 'A Walk to Remember',
'duration': datetime.timedelta(hours=2, minutes=15),
'price': 8.99
}
defaults.update(params)
return Movie.objects.create(user=user, *... | d07716fbe4b043022592ae2465bb02d02f45fe41 | 5,936 |
import difflib
def lines_diff(lines1, lines2):
"""Show difference between lines."""
is_diff = False
diffs = list()
for line in difflib.ndiff(lines1, lines2):
if not is_diff and line[0] in ('+', '-'):
is_diff = True
diffs.append(line)
return is_diff, diffs | 50916d46871980fadfd854dc698481a4b0f35834 | 5,937 |
import re
def parse_ipmi_hpm(output):
"""Parse the output of the hpm info retrieved with ipmitool"""
hrdw = []
line_pattern = re.compile(r'^\|[^0-9]*([0-9]+)\|[^a-zA-Z ]* ?([^\|]*)\|([^\|]*)\|([^\|]*)\|([^\|]*)\|')
for line in output:
match = line_pattern.match(line)
if match:
... | 001731ce46fa6bbdb5103727265a0bdd353773be | 5,938 |
def get_genes_and_pathways(reactions, r_numbers, species):
"""Returns a CSV-formatted string with the list of genes and pathways where
the reaction(s) of 'species' appear.
:param reactions: list of reactions for species
:param r_numbers: RNumbers object
:param species: KEGG organism code
:retur... | 0ecddcaf50650b04125be73bcf6b304a77df011d | 5,939 |
import os
def datasetFiles(request):
"""
Return a list all dataset files in the datasets directory, by looking for files ending
with .h5 suffix. eg. ['/Users/jarnyc/BioPyramid/data/datasets/lanner.1.0.h5']
"""
# This is the dataset directory, set by the config file
datadir = request.registry.settings['biopyrami... | 0c4e2ffff720ec24b6f673f059baa023458f72e9 | 5,940 |
def relate_ca(assessment, template):
"""Generates custom attribute list and relates it to Assessment objects
Args:
assessment (model instance): Assessment model
template: Assessment Temaplte instance (may be None)
"""
if not template:
return None
ca_definitions = all_models.CustomAttri... | 31744ac40f385746e6d4e13a97ed461312280d99 | 5,941 |
def getSenderNumberMgtURL(request):
"""
๋ฐ์ ๋ฒํธ ๊ด๋ฆฌ ํ์
URL์ ๋ฐํํฉ๋๋ค.
- ๋ณด์์ ์ฑ
์ ๋ฐ๋ผ ๋ฐํ๋ URL์ 30์ด์ ์ ํจ์๊ฐ์ ๊ฐ์ต๋๋ค.
- https://docs.popbill.com/fax/python/api#GetSenderNumberMgtURL
"""
try:
# ํ๋นํ์ ์ฌ์
์๋ฒํธ
CorpNum = settings.testCorpNum
# ํ๋นํ์ ์์ด๋
UserID = settings.testUserID
... | 371ca0a813c54061c68af34719ca132081f0bfda | 5,942 |
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:... | 3a212d880004fad843fe2d254ac96315bd1d12cf | 5,943 |
def average(w, axis=-1):
"""Calculate average
Example:
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]))
>>> average(w1)
Waveform(array([0, 1]), array([ 2. , 2.5]))
>>> w1=Waveform([range(2), range(2)],array([[1.0, 3.0], [0.0, 5.0]]), \
xlabels=['row'... | bd5510e78c995e0a9f656144393b0496e071cdf5 | 5,944 |
def random():
"""Return a random parameter set for the model."""
total_thickness = 10**np.random.uniform(2, 4.7)
Nlayers = np.random.randint(2, 200)
d_spacing = total_thickness / Nlayers
thickness = d_spacing * np.random.uniform(0, 1)
length_head = thickness * np.random.uniform(0, 1)
length_... | 958410bb8a696652b5a58cb15168719c2391179d | 5,945 |
def extract_features_to_dict(image_dir, list_file):
"""extract features and save them with dictionary"""
label, img_list = load_image_list(image_dir, list_file)
ftr = feature
integer_label = label_list_to_int(label)
feature_dict = {'features': ftr,
'label': integer_label,
... | 2fe641d7bcc24f293fae0c8badf274c9f32051d4 | 5,946 |
import torch
from typing import List
from typing import Dict
def roi_heads_forward(
self,
features, # type: Dict[str, Tensor]
proposals, # type: List[Tensor]
image_shapes, # type: List[Tuple[int, int]]
targets=None, # type: Optional[List[Dict[str, Tensor]]]
):
# type: (...) -> Tuple[List[D... | fae859b8e986694d457e9d4933071eed76a49142 | 5,947 |
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize() | 1c9b86e2bbffc486d624e7305f303d517a282b75 | 5,948 |
def S_tunnel_e0(self, mu, sig, Efl, Efr, Tl, Tr):
"""energy flux
Conduction band edge 0 at higher of the two
"""
a = mu-sig/2
b = mu+sig/2
kTl = sc.k*Tl
kTr = sc.k*Tr
Blr = (a/kTl+1)*np.exp(-a/kTl)-(b/kTl+1)*np.exp(-b/kTl)
Brl = (a/kTr+1)*np.exp(-a/kTr)-(b/kTr+1)*np.exp(-b/kTr)
S... | 224b115d7205994e897bc74010fd4f24d562cc6c | 5,949 |
def to_camel_java(text, first_lower=True):
"""Returns the text in camelCase or CamelCase format for Java
"""
return to_camelcase(text, first_lower=first_lower,
reserved_keywords=JAVA_KEYWORDS, suffix="_") | c14b102502d7caa1dc51511ffd3c97f736a5c17b | 5,950 |
def rectangle_field(N_1, N_2, B_1, B_2, H, D, r_b):
"""
Build a list of boreholes in a rectangular bore field configuration.
Parameters
----------
N_1 : int
Number of borehole in the x direction.
N_2 : int
Number of borehole in the y direction.
B_1 : float
Distance (... | 955bc7f2bf3a79d790683e7589010bc81af98f85 | 5,951 |
def convertHunit(conc, from_unit='H/10^6 Si', to_unit='ppm H2O', phase='Fo90',
printout=True):
"""
Convert hydrogen concentrations to/from H/10^6 Si and ppm H2O.
Based on Table 3 of Denis et al. 2013
"""
if phase == 'Fo90':
H_to_1_ppm = 16.35
elif phase == 'opx':
H_t... | fdd0646a09f3a2c3a8cbbc02410103caa9e023dd | 5,952 |
import re
def countBasesInFasta(fastaFile):
"""
Given a fasta file, return a dict where the number of records and
the total number of bases are given by 'records' and 'bases' respectively.
"""
recordRE = re.compile(r'^>')
whiteSpaceRE = re.compile(r'\s+')
total_bases = 0
total_seqs = 0... | 45eaa5b8d36b4bae6b97bb29fdead1efc0aed8c2 | 5,953 |
import torchvision
import torch
def load_mnist_denoising(path_raw_dataset, batch_size=1, mu=0., sigma=0.6, deterministic=True):
"""
1. Get the MNIST dataset via PyTorch built-in APIs.
2. Wrap it with customized wrapper with additive Gaussian noise processor
3. Build PyTorch data loader objects.
:... | 4dbd365a0fa6d795714aa90828fe7bb2cbc9b99f | 5,954 |
def make_triplet_freqs(sentence, triplet_freqs):
"""
ๆๅญๅใ3ใค็ตใซใใ
"""
# Janomeใงๅ่ชใซๅๅฒใใ
t = Tokenizer()
morphemes = [token.surface for token in t.tokenize(sentence)]
if len(morphemes) < 3:
return {}
# ็นฐใ่ฟใ
for i in range(len(morphemes) - 2):
triplet = tuple(morphemes[i... | 97fc3affd841e148f58de487d171df61745d17a9 | 5,955 |
def test_train_val_split(patient_id,
sub_dataset_ids,
cv_fold_number):
""" if cv_fold_number == 1:
if patient_id in sub_dataset_ids[-5:]: return 'test'
elif patient_id in sub_dataset_ids[-7:-5]: return 'validation'
else: return 'train'
... | 129f3856875033505555241408577f8885c9c393 | 5,956 |
import os
def get_convertible_info():
"""
D:\Trade\TDX\cjzq_tdx\T0002\hq_cache\speckzzdata.txt
:return:
"""
filename = '{}{}{}'.format(TDX_DIR, os.sep, 'T0002\\hq_cache\\speckzzdata.txt')
columns = [
'exchange', 'code', 'stock_code', 'convert_price', 'current_interest', 'list_amount', ... | 82d7b9485095b504f3d3a39bf446ccf7bc170558 | 5,957 |
def test_striplog_colour_plot():
"""
Tests mpl image of striplog with the ladder option.
"""
legend = Legend.builtin('NSDOE')
imgfile = "tutorial/M-MG-70_14.3_135.9.png"
striplog = Striplog.from_image(imgfile, 14.3, 135.9, legend=legend)
for iv in striplog:
iv.data['porosity'] = i... | a76f01a5b6255a0dfe39aca7cc3e352787457d17 | 5,958 |
import subprocess
import sys
def fetch_data(
o_included: str,
flowcharts: dict,
o_metadata_file: str,
o_biom_file: str,
p_redbiom_context: str,
p_bloom_sequences: str,
p_reads_filter: int,
unique: bool,
update: bool,
dim: bool) -> pd.Data... | f7cb52b2dfc7c33038f448dfd8578a6e54d1d2fa | 5,959 |
def searchArtist(artistName, session=models.session):
"""Search for artist. Returns models.ArtistSearch"""
return models.ArtistSearch(artistName, session) | 4fd9e45b633285a9ee1817a84508749d1ba724e7 | 5,960 |
def _ddnone():
"""allow defaultdict to be pickled"""
return defaultdict(_none) | 9a050e08b0c47bc789f0238489c679d01a42c1ba | 5,961 |
from operator import and_
def apply_join(query: Select, table: Table, join_table: Table, join: TableJoin):
"""
Performs a inner or outer join between two tables on a given query object.
TODO: enable multiple joins
:param query: A SQLAlchemy select object.
:param table: The Table we are joining f... | 1c5bfc7de3f1c7b9e17588e730085e5dc87d7c49 | 5,962 |
def filter_shapely(feature):
"""
feature1 = feature_extract(feature)
feature2 = filter_shapely(feature1)
"""
tmp = extract_Accumulation_entropy_list(feature)
tmp2=[]
for i in range(len(tmp)):
if i!=0:
tmp2.append(tmp[i]-tmp[i-1])
else:
tmp2.a... | 54654130340a3485a7de9a3d5a51d3def8a01037 | 5,963 |
def stations_by_river(stations):
"""Returns a dictionary mapping river names (key)
to a list of stations (object)"""
rivers_stations_dict = {} # Create empty dictionary
for i in range(len(stations)): # Iterate through list of stations
# Data type checks
if type(stations[i]) is ... | d57bc06b60d6669bf6a10b7ad05363124f2312b5 | 5,964 |
def getCurrentProfile():
"""
Get the name of the current profile.
"""
return __createJSON("GetCurrentProfile", {}) | 6627d01348d566f0d079b8e7bcf04e35ad6ed0ba | 5,965 |
def get_params_from_request(req: web.Request) -> QueryParams:
"""
This function need for convert query string to filter parameters.
"""
page = int(req.rel_url.query.get('page', '1'))
cursor = req.rel_url.query.get('cursor')
sort = req.rel_url.query.get('sort')
sort_dir = req.rel_url.query.ge... | b0deb4e5a1dc10fe82745e6c3c0869015424e2e0 | 5,966 |
def norm_mem_interval(pt):
"""Normalize membership in interval."""
return pt.on_prop(arg_conv(binop_conv(auto.auto_conv()))) | b50aa86d942fe1c2f35c6bcffae350042ff86090 | 5,967 |
def create_figure():
"""
Creates a simple example figure.
"""
fig = Figure()
a = fig.add_subplot(111)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2 * np.pi * t)
a.plot(t, s)
return fig | 532a4eda745cb969f8ef60e66d6f63e761b8a5ff | 5,968 |
def rdf_reader(src):
"""rdf = rdf_reader(src)
src rdf filename
rdf The RDF mapping object"""
return RDF(*list(rdf_include(src))) | cf64ee6ed12a3e0d1667a537ac696918d26f80ba | 5,969 |
def draw_signalData(Nsamp=1, alpha=__alpha, beta=__beta, **kwargs):
"""
draw an SNR from the signal distribution
"""
return np.array([ncx2.rvs(__noise_df, nc) for nc in __draw_truncatedPareto(Nsamp, alpha=alpha, beta=beta)]) | 6dc320e2289c30a0e68696be71ded30066d7fa74 | 5,970 |
def choose_weighted_images_forced_distribution(num_images, images, nodes):
"""Returns a list of images to cache
Enforces the distribution of images to match the weighted distribution as
closely as possible. Factors in the current distribution of images cached
across nodes.
It is important to note... | 8cf49fd376893be254d5075930475de9cedee004 | 5,971 |
def predict_lumbar_ankles_model(data):
"""Generate lumbar + 2 ankles model predictions for data.
Args:
data (dict): all data matrices/lists for a single subject.
Returns:
labels (dict): columns include 'probas' (from model) and 'true'
(ground truth). One row for each fold.
... | 581a45a71bb17ebebf3a8ea63dbbfb898c6e3567 | 5,972 |
def breakOnEnter(func=None, *, debugger='pdb'):
"""
A function wrapper that causes debug mode to be entered when the
wrapped function is called.
Parameters
----------
func : The function to wrap.
debugger : The debugger used when debug mode is entered. This can
be either the debugg... | 58b1e965a563ca19c886eef6a623af84ae9ae29c | 5,973 |
def linear_search(iterable, item):
"""Returns the index of the item in the unsorted iterable.
Iterates through a collection, comparing each item to the target item, and
returns the index of the first item that is equal to the target item.
* O(n) time complexity
* O(1) space complexity
Args:
iterable:... | bdbd7e70cea79deef1375648bde61067df1d2221 | 5,974 |
def create_MD_tag(reference_seq, query_seq):
"""Create MD tag
Args:
reference_seq (str) : reference sequence of alignment
query_seq (str) : query bases of alignment
Returns:
md_tag(str) : md description of the alignment
"""
no_change = 0
md = []
for ref_base, query_ba... | 4b711521d00af132e8e29fe4fc44785b985c2607 | 5,975 |
import subprocess
import os
def get_diff(base, head=None):
"""Return a git diff between the base and head revision.
:type base: str
:type head: str | None
:rtype: list[str]
"""
if not head or head == 'HEAD':
head = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
cach... | a69fc80c03c2cf89ec47b510397bf60c8dc5732c | 5,976 |
def split_last(dataframe, target_col, sort_col='date', cut=.9):
"""Splits the dataframe on sort_column at the given cut ratio, and splits
the target column
Args:
dataframe: dataframe to be cut
sort_col: column to be sorted on. Default='date'
cut: cut ratio for the train/eval sets
... | 090144fa9c68f8ffc9e9e7c2e9c8427f0aff862d | 5,977 |
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ig... | 2fc8c4d12467ab3c0b86201271f42b7d22130b82 | 5,978 |
import csv
def readCSV(associated_ipaddr, ipaddr, timestamp):
"""
Method that extracts observations from a CSV file.
Parameters:
associated_ipaddr (str): The name of the column that specifies IP addresses of VPN clients
ipaddr (str): The name of the column that specifies IP ad... | 77594e98b83cd5d49bd8a70b28b54cab92dcadeb | 5,979 |
def interp2d(x, y, z, outshape, verbose=True, doplot=True):
"""
Parameters
----------
x, y : int
X and Y indices of `z`.
z : float
Values for given `x` and `y`.
outshape : tuple of int
Shape of 2D output array.
verbose : bool, optional
Print info to screen.... | 05558e413139a0ad71a4240e3f44c4bb9019c314 | 5,980 |
import shlex
import subprocess
def run_cmd(cmd: Text, split: bool = True, shell=False, verbose: bool = True):
"""Run a system command and print output."""
print(f'CMD: {cmd}')
cmd = shlex.split(cmd) if split else [cmd]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell)
while True... | 071df0f5b185249128b3c52a48aa1787d1299fe9 | 5,981 |
def _nms_boxes(detections, nms_threshold):
"""Apply the Non-Maximum Suppression (NMS) algorithm on the bounding
boxes with their confidence scores and return an array with the
indexes of the bounding boxes we want to keep.
# Args
detections: Nx7 numpy arrays of
[[x, y, w, h, ... | 9d3ad16396f1e94e4ac8efe1e73e8f06b529ff0f | 5,982 |
import types
def dht_get_key(data_key):
"""
Given a key (a hash of data), go fetch the data.
"""
dht_client = get_dht_client()
ret = dht_client.get(data_key)
if ret is not None:
if type(ret) == types.ListType:
ret = ret[0]
if type(ret) == types.DictType and ret.h... | 0c8680996e21b7dcc02cd4b7d81f3fa500b02076 | 5,983 |
def get_dataframe_from_table(table_name, con):
"""
put table into DataFrame
"""
df = pd.read_sql_table(table_name, con)
return df | cdf94277c2f4e3acdd22b87de7cd9d0fee63b24c | 5,984 |
from typing import List
from typing import Dict
import requests
def _find_links_in_headers(*, headers, target_headers: List[str]) -> Dict[str, Dict[str, str]]:
"""Return a dictionary { rel: { url: 'url', mime_type: 'mime_type' } } containing the target headers."""
found: Dict[str, Dict[str, str]] = {}
lin... | ee23c9c7ca2633d11ea33ac2695a46eca4188af5 | 5,985 |
import re
def calc_word_frequency(my_string, my_word):
"""Calculate the number of occurrences of a given word in a given string.
Args:
my_string (str): String to search
my_word (str): The word to search for
Returns:
int: The number of occurrences of the given word in the given st... | 15ff723dd2ff089fb12cccb38283f1f75e37079d | 5,986 |
from typing import Counter
def asyn_lpa_communities(G, weight=None, seed=None):
"""Returns communities in `G` as detected by asynchronous label
propagation.
The asynchronous label propagation algorithm is described in
[1]_. The algorithm is probabilistic and the found communities may
vary on diff... | d6696f9347684dee6a81c8dc7c240a3c200ec629 | 5,987 |
def _make_warmstart_dict_env():
"""Warm-start VecNormalize by stepping through BitFlippingEnv"""
venv = DummyVecEnv([make_dict_env])
venv = VecNormalize(venv)
venv.reset()
venv.get_original_obs()
for _ in range(100):
actions = [venv.action_space.sample()]
venv.step(actions)
... | 67e0ee3e8440c24a08e306afbb9891dee64dd11d | 5,988 |
def record_attendance(lesson_id):
"""
Record attendance for a lesson.
"""
# Get the UserLessonAssociation for the current and
# the given lesson id. (So we can also display attendance etc.)
lesson = Lesson.query.filter(Lesson.lesson_id == lesson_id).first()
# Ensure the lesson id/associatio... | 237fb1df5eaf1f1b7d9555ca636971318f23c360 | 5,989 |
def ts_to_datestr(ts, fmt="%Y-%m-%d %H:%M"):
"""ๅฏ่ฏปๆง"""
return ts_to_datetime(ts).strftime(fmt) | 29b180c0d569768b173afb960d9cb09e86519741 | 5,990 |
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) *
math_ops.sign(op.inputs[0])) | f150a36bb852e4c722771220d4ed976875fce0ef | 5,991 |
def so3_rotate(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zero... | 84c184c920833bf2037b0f4181e9f25bcf6fd5ce | 5,992 |
import hashlib
def intmd5(source: str, nbytes=4) -> int:
"""
Generate a predictive random integer of nbytes*8 bits based on a source string.
:param source:
seed string to generate random integer.
:param nbytes:
size of the integer.
"""
hashobj = hashlib.md5(source.encode())
retu... | c03eb99a67af00a4a081423ecca3a724111514e1 | 5,993 |
def trisolve(a, b, c, y, inplace=False):
"""
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems
of equations:
a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i}
in matrix form:
Mx = y
TDMA is O(n), whereas standard Gaussian elimination is O(n^3).
Argument... | ead814b1025e8458f7e1eabeecf3eb89cb9edd5d | 5,994 |
def calc_mean_pred(df: pd.DataFrame):
"""
Make a prediction based on the average of the predictions of phones
in the same collection.
from https://www.kaggle.com/t88take/gsdc-phones-mean-prediction
"""
lerp_df = make_lerp_data(df=df)
add_lerp = pd.concat([df, lerp_df])
# each time step =... | a4f6cdb0d5efb72cd6b503a8eb3a0f4b13cee0bf | 5,995 |
def get_meals(v2_response, venue_id):
"""
Extract meals into old format from a DiningV2 JSON response
"""
result_data = v2_response["result_data"]
meals = []
day_parts = result_data["days"][0]["cafes"][venue_id]["dayparts"][0]
for meal in day_parts:
stations = []
for station... | 9d27d225a39248690529167f7ff18777a086bcc6 | 5,996 |
async def async_setup(hass, config_entry):
""" Disallow configuration via YAML """
return True | 759cc705a82a0f9ff9d4d43cb14d641d7e552aaa | 5,997 |
def blend(im1, im2, mask):
"""
Blends and shows the given images according to mask
:param im1: first image
:param im2: second image
:param mask: binary mask
:return: result blend
"""
res = []
for i in range(3):
res.append(pyramid_blending(im1[:, :, i], im2[:, :, i], mask, 7, ... | 4b4a635d1f44ced411b9dfe2037b0f42805f38b2 | 5,998 |
def parse(fileName):
"""
Pull the EXIf info from a photo and sanitize it so for sending as JSON
by converting values to strings.
"""
f = open(fileName, 'rb')
exif = exifread.process_file(f, details=False)
parsed = {}
for key, value in exif.iteritems():
parsed[key] = str(value)
... | 3f5aca5b38dd7f3b3a9defae1fc5f645e255a191 | 5,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.