content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def all_target_and_background_names(experiment_proto):
"""Determine names of all molecules for which to calculate affinities.
Args:
experiment_proto: selection_pb2.Experiment describing the experiment.
Returns:
List of strings giving names of target molecules followed by background
molecules.
"""
targets = set()
backgrounds = set()
for round_proto in experiment_proto.rounds.values():
for k, v in round_proto.target_concentrations.items():
# don't add targets with a concentration of exactly zero
if v:
targets.add(k)
for k, v in round_proto.background_concentrations.items():
if v:
backgrounds.add(k)
return sorted(targets) + [b for b in sorted(backgrounds) if b not in targets] | 90d701e1da0ee26b27e8fa9ac178131199d4c8ea | 691,773 |
import random
def generate_name(start, markov_chain, max_words=2):
"""Generate a new town name, given a start syllable and a Markov chain.
This function takes a single start syllable or a list of start syllables,
one of which is then chosen randomly, and a corresponding Markov chain to
generate a new fictional town name. The number of words in the name can
optionally be passed in as an argument and defaults to 2 otherwise.
Note that it is possible that the generated name already exists. To avoid
that, one should check whether the name exists against the set of input
names.
"""
while True:
if isinstance(start, list):
# If start is a list choose a syllable randomly
next_syllable = random.choice(start)
else:
next_syllable = start
# Initialise new name
new_name = next_syllable
while True:
# Choose next syllable from the Markov chain
next_syllable = random.choice(markov_chain[next_syllable])
# Return if end of word has been reached
if next_syllable == 0:
break
else:
new_name += next_syllable
# Remove leading and trailing spaces
new_name = new_name.strip()
# Make sure name has less words than max_words, otherwise start over
if len(new_name.split(" ")) <= max_words:
break
# Capitalise every word in the new name
new_name = " ".join([word.capitalize() for word in new_name.split(" ")])
return new_name | dd40e0ad715bf8957d9bfcfc701997883766f7ca | 691,775 |
def process_wildcard(composition):
"""
Processes element with a wildcard ``?`` weight fraction and returns
composition balanced to 1.0.
"""
composition2 = composition.copy()
wildcard_zs = set()
total_wf = 0.0
for z, wf in composition.items():
if wf == "?":
wildcard_zs.add(z)
else:
total_wf += wf
if not wildcard_zs:
return composition2
balance_wf = (1.0 - total_wf) / len(wildcard_zs)
for z in wildcard_zs:
composition2[z] = balance_wf
return composition2 | b2ab51b96c24fa80301a401bab5111ecfb77b4d0 | 691,776 |
def sunion_empty(ls):
""" return empty set if the list of sets (ls) is empty"""
try:
return set.union(*ls)
except TypeError:
return set() | 4988820c60c6fa7bdb631bbe09d73f21a79dda9d | 691,778 |
import re
def first_item_grabber(the_str: str, re_separator_ptn=";|\-|–|,|\|", def_return=None):
"""
From a string containing more than one item separated by separators, grab the first.
>>> first_item_grabber("1987, A1899")
'1987'
>>> first_item_grabber("1987;A1899")
'1987'
>>> first_item_grabber("1916–1917[1915–1917]")
'1916'
"""
ret_val = re.split(re_separator_ptn, the_str)
if ret_val != []:
ret_val = ret_val[0]
else:
ret_val = def_return
return ret_val | 1b332b28eed5043d0890e862fad884ab72bdf8c7 | 691,782 |
def exact_change_dynamic(amount,coins):
"""
counts[x] counts the number of ways an amount of x can be made in exact change out of a subset of coins
given in the list of denominations 'coins'.
Initially there are no possibilities, if no coins are allowed
>>> exact_change_dynamic(20,[50,20,10,5,2,1])
[1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 19, 22, 25, 28, 31, 34, 41]
>>> exact_change_dynamic(100,[100,50,20,10,5,2,1])[-10:]
[3229, 3376, 3484, 3631, 3778, 3925, 4072, 4219, 4366, 4563]
"""
counts = [0]*(amount+1)
# Except: there is 1 way to get a change of 0 ct using no coins
counts[0] = 1
# Recalculate counts by allowing additional denominations from coins one by one
for denomination in coins:
for x in range(denomination,amount+1):
# Using an additional coin of 'denomination' we have an additional 'counts[x-denomination]' possibilities
#
counts[x] += counts[x-denomination]
return counts | 4a41b270451427a055a54afe346d7df8aa1874c9 | 691,788 |
def rubicon_and_project_client_with_experiments(rubicon_and_project_client):
"""Setup an instance of rubicon configured to log to memory
with a default project with experiments and clean it up afterwards.
Expose both the rubicon instance and the project.
"""
rubicon, project = rubicon_and_project_client
for e in range(0, 10):
experiment = project.log_experiment(
tags=["testing"],
commit_hash=str(int(e / 3)),
training_metadata=("training", "metadata"),
)
experiment.log_parameter("n_estimators", e + 1)
experiment.log_feature("age")
experiment.log_metric("accuracy", (80 + e))
return (rubicon, project) | bb35e31554d019ccf07131078736757c642354ab | 691,790 |
def consoO(R,s,tau,w):
"""Compute the consumption of the old agents
Args:
R (float): gross return on saving
s (float): savings
tau (float): percentage of contribution of the wage of the young agent
w (float): wage
Returns:
(float): consumption of the old agents
"""
return R*s+tau*w | 522b6b51b50db29b60a5adc473d6cd9cc04a6a3a | 691,791 |
def to_upper_camelcase(lower_underscore: str):
"""Convert underscore naming to upper camelcase.
Example:
rock_type --> RockType
"""
splits = lower_underscore.split("_")
splits = [split.capitalize() for split in splits]
return "".join(splits) | a0973c5b2c71e0df622cd6adc516459bf7896ea6 | 691,792 |
def normalize(tensor, mean, std):
"""Normalize a ``torch.tensor``
Args:
tensor (torch.tensor): tensor to be normalized.
mean: (list): the mean of BGR
std: (list): the std of BGR
Returns:
Tensor: Normalized tensor.
"""
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor | 2dea96d14fd52898bd967725d8805d1ab10ea7cd | 691,793 |
def _read_band_number(file_path):
"""
:type file_path: Path
:return:
>>> _read_band_number(Path('reflectance_brdf_2.tif'))
'2'
>>> _read_band_number(Path('reflectance_terrain_7.tif'))
'7'
>>> p = Path('/tmp/something/LS8_OLITIRS_NBAR_P54_GALPGS01-002_112_079_20140126_B4.tif')
>>> _read_band_number(p)
'4'
"""
number = file_path.stem.split('_')[-1].lower()
if number.startswith('b'):
return number[1:]
return number | e02594f32d87260231951df94bbe8e3d704ddc6b | 691,794 |
def min_max(x,axis=None):
"""
return min_max standalization
x = (x-x.min)/(x.max-x.min)
min=0 max=1
Parameters
-------------------
x : numpy.ndarray(x,y)
axis :int 0 #caliculate each col
1 # each row
Returns
--------------------
result : np.ndarray(x,y)
"""
xmin =x.min(axis=axis,keepdims=True)
xmax =x.max(axis=axis,keepdims=True)
result = (x-xmin)/(xmax-xmin)
return result | a7a31bfdda1d6a21a8ee0fbe5148d6cdd53aa60b | 691,796 |
def calc_fitness(fit_form, sum_energy, coef_energy, sum_rmsd, coef_rmsd):
"""Calculate the fitness of a pmem.
Parameters
----------
fit_form : int
Represents the fitness formula to use.
The only value currently available is 0,
where fitness = CE*SE + Crmsd*Srmsd.
sum_energy : float
The summation of all of the individual
energy calculations for each of the geometries.
coef_energy : float
The energy coefficient in the fitness formula.
sum_rmsd : float
The summation of all rmsd when comparing
pairs of geometries.
coef_rmsd : float
The rmsd coefficient in the fitness formula.
Raises
------
ValueError
The fit_form value is not available.
Returns
-------
fitness : float
"""
if fit_form == 0:
return sum_energy*coef_energy + sum_rmsd*coef_rmsd
raise ValueError("Unsupported fitness formula.") | 7ac64e72dbbdf6caacad73f99061408d12f7df5e | 691,800 |
def encode_sentences(sentences, lexicon_dictionary):
"""
Change words in sentences into their one-hot index.
:param sentences: A list of sentences where all words are in lexicon_dictionary
:param lexicon_dictionary: A dictionary including all the words in the dataset
sentences are being drawn from.
:return: sentences with each word replaced by a number.
"""
new_sentence = []
for word in sentences.split():
new_sentence.append(lexicon_dictionary[word])
return new_sentence | 69af36f02b2b66198f54803072a340c93aaeb31f | 691,802 |
def trim_method_name(full_name):
"""
Extract method/function name from its full name,
e.g., RpcResponseResolver.resolveResponseObject -> resolveResponseObject
Args:
full_name (str): Full name
Returns:
str: Method/Function name
"""
point_pos = full_name.rfind('.')
if point_pos != -1:
return full_name[point_pos + 1:]
else:
return full_name | 4783d19103822d68dfbc2c28a7d59acd041216f6 | 691,803 |
def prepare_df_annoVar(df):
"""Prepare internal dataframe as input to ANNOVAR.
Generates a list of all the column names, adding a repeat of position to give start and end, as required by
ANNOVAR input format, then reorders the columns to ensure the first 5 are those required by ANNOVAR
(chromosome, start position, end position, reference allele, observed allele.)
See annovar.openbioinformatics.org/en/latest/user-guide/input/ for more details.
"""
# make a list of all column names; position repeats twice for input
df['position2'] = df['position']
wanted = ['chromosome', 'position', 'position2','allele1', 'allele2']
colnames = df.columns
# list comprehensions to identify first 5 column names
final_colnames = [col for col in wanted if col in colnames] + [col for col in colnames if col not in wanted]
# re-order dataframe according to final list of column names and return
annot_input = df[final_colnames]
return annot_input | 21cf6cc2e884f5351b99ed7e5c6f2942dde6ad0d | 691,805 |
def check_length(line, min=0, max=0):
"""Does a length check on the line
Params:
line (unicode)
min (int)
max (int)
Returns
true if length is ok
"""
status = True
if min and status:
status = len(line) >= min
if max and status:
status = len(line) < max
return status | c0e4b79dc1caeaa94c2af7741f6a7113c0384abf | 691,806 |
from typing import Union
import pathlib
def supplement_file_name(file: Union[str, pathlib.Path], sup: str) -> pathlib.Path:
"""
Adds a string between the file name in a path and the suffix.
**Parameters**
- `file` : str
File name
- `sup` : str
String to be added
**Returns**
- `out`: pathlib.Path
"Supplemented" file
"""
file = pathlib.Path(file)
# the `suffix` is incorporated into the file name
return file.with_name(file.stem + f'_{sup}' + file.suffix) | 1cba9e55939a9c474d9d1a8fffda1023953a457d | 691,807 |
def split_xyz(xyz_file: bytes) -> list[bytes]:
"""Split an xyz file into individual conformers."""
lines = xyz_file.splitlines()
structures = []
while True:
if len(lines) == 0:
break
# removed one deck
natoms = lines.pop(0)
n = int(natoms.decode())
comment = lines.pop(0)
geom = []
for _ in range(n):
geom += [lines.pop(0)]
deck = b"\n".join([natoms, comment] + geom)
structures += [deck]
return structures | 4857fc838f4490526eb9fee4f71318b8ab7c06fe | 691,810 |
def left_justify(words, width):
"""Given an iterable of words, return a string consisting of the words
left-justified in a line of the given width.
>>> left_justify(["hello", "world"], 16)
'hello world '
"""
return ' '.join(words).ljust(width) | 26a2e9f3df582355966959996ae672f60b5c00cc | 691,812 |
from typing import Any
def is_property(obj: Any) -> bool:
"""Check the given `obj` is defined with `@property`.
Parameters:
- `obj`: The python object to check.
Returns:
- `True` if defined with `@property`, otherwise `False`.
"""
return isinstance(obj, property) | 22c20ea7050756a4274822b961811154c6b85210 | 691,818 |
def _is_in_bounds(x: int, y: int, width: int, height: int) -> bool:
"""
Returns whether or not a certain index is within bounds.
Args:
x (int): x pos.
y (int): y pos.
width (int): max x.
height (int): max y.
"""
if x < 0:
return False
if y < 0:
return False
if x >= width:
return False
if y >= height:
return False
return True | 8fc76261972588599b183364b3b8c350389d33c0 | 691,819 |
def data_to_html(title, data):
"""Turns a list of lists into an HTML table"""
# HTML Headers
html_content = """
<html>
<head>
<style>
table {
width: 25%;
font-family: arial, sans-serif;
border-collapse: collapse;
}
tr:nth-child(odd) {
background-color: #dddddd;
}
td, th {
border: 1px solid #dddddd;
text-align: left;
padding: 8px;
}
</style>
</head>
<body>
"""
# Add the header part with the given title
html_content += "<h2>{}</h2><table>".format(title)
# Add each row in data as a row in the table
# The first line is special and gets treated separately
for i, row in enumerate(data):
html_content += "<tr>"
for column in row:
if i == 0:
html_content += "<th>{}</th>".format(column)
else:
html_content += "<td>{}</td>".format(column)
html_content += "</tr>"
html_content += """</tr></table></body></html>"""
return html_content | c1eb000fd5947fbaa74e1876a6a4f839f5ffe8cf | 691,821 |
from pathlib import Path
def get_file_extension(path):
"""Gets the dot-prefixed extension from the path to a file.
:param path: Path to the file to get the extension from.
:type path: str
:return: The file's extension.
:rtype: str
Examples
--------
>>> get_file_extension('/home/user/file.txt')
'.txt'
"""
return Path(path).suffix | 8e6e97b0046edf31febbe0c731877ea8ecc5186a | 691,823 |
def is_after(t1, t2):
"""True if t1 is after t2
t1, t2: Time objects
"""
return (t1.hour > t2.hour and
t1.minute > t2.minute and
t1.second > t2.second) | bec06b864152cd7c6857c6c4460f9e47c8e4dde5 | 691,825 |
def reversed_arguments(func):
"""
Return a function with reversed argument order.
"""
def wrapped(*args):
return func(*reversed(args))
return wrapped | bfd818c0a87f169c06331f1db4e8e6e31e5546cd | 691,829 |
def get_priority_value_map(all_priorities):
"""
Maps an index of increasing size to each priority ranging from low -> high
e.g. given ['LOW', 'MEDIUM', 'HIGH'] will return {'LOW': 0, 'MEDIUM': 1, 'HIGH': 2}
"""
return dict((priority_text.upper(), priority_index)
for priority_index, priority_text in enumerate(all_priorities)) | 5a3b85f7b6bdd20a3c6cf2cbeac19e9bb3882cf5 | 691,831 |
import torch
def val_epoch(model, val_loader, criterion, device):
"""Validate the model for 1 epoch
Args:
model: nn.Module
val_loader: val DataLoader
criterion: callable loss function
device: torch.device
Returns
-------
Tuple[Float, Float]
average val loss and average val accuracy for current epoch
"""
val_losses = []
val_corrects = []
model.eval()
# Iterate over data
with torch.no_grad():
for inputs, labels in val_loader:
inputs = inputs.to(device)
labels = labels.to(device)
# prediction
outputs = model(inputs)
# calculate loss
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# statistics
val_losses.append(loss.item())
val_corrects.append(torch.sum(preds == labels.data).item())
return sum(val_losses)/len(val_losses), sum(val_corrects)/len(val_loader.dataset) | 80576b4181f08a2a35276a78a143bbf59233dd9c | 691,832 |
def compute_error(b, m, coordinates):
"""
m is the coefficient and b is the constant for prediction
The goal is to find a combination of m and b where the error is as small as possible
coordinates are the locations
"""
totalError = 0
for i in range(0, len(coordinates)):
x = coordinates[i][0]
y = coordinates[i][1]
totalError += (y - (m * x + b)) ** 2
return totalError / float(len(coordinates)) | c300a137e3fe75ee2c9a23265d1523a96907d7f7 | 691,833 |
import torch
def _bilinear_interpolation_vectorized(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the naming conventions for torch.nn.functional.grid_sample).
This implementation uses the same steps as in the SoftRas cuda kernel
to make it easy to compare. This vectorized version requires less memory than
_bilinear_interpolation_grid_sample but is slightly slower.
If speed is an issue and the number of faces in the mesh and texture image sizes
are small, consider using _bilinear_interpolation_grid_sample instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
H, W, _ = image.shape
# Convert [0, 1] to the range [0, W-1] and [0, H-1]
grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid)
weight_1 = grid - grid.int()
weight_0 = 1.0 - weight_1
grid_x, grid_y = grid.unbind(-1)
y0 = grid_y.to(torch.int64)
y1 = (grid_y + 1).to(torch.int64)
x0 = grid_x.to(torch.int64)
x1 = x0 + 1
weight_x0, weight_y0 = weight_0.unbind(-1)
weight_x1, weight_y1 = weight_1.unbind(-1)
# Bi-linear interpolation
# griditions = [[y, x], [(y+1), x]
# [y, (x+1)], [(y+1), (x+1)]]
# weights = [[wx0*wy0, wx0*wy1],
# [wx1*wy0, wx1*wy1]]
out = (
image[y0, x0] * (weight_x0 * weight_y0)[..., None]
+ image[y1, x0] * (weight_x0 * weight_y1)[..., None]
+ image[y0, x1] * (weight_x1 * weight_y0)[..., None]
+ image[y1, x1] * (weight_x1 * weight_y1)[..., None]
)
return out | b9e3596f1e3d98bb598e74cf3f1c142b376b79a9 | 691,834 |
def get_ds_data(ds, target_attribute='targets'):
"""
Returns X and y data from pymvpa dataset
"""
return ds.samples, ds.sa[target_attribute].value | f96a2bf87b18e53961c9abf99e24c2f22730461b | 691,835 |
def getFrameLevelDisplacements(nodeFound, start, finish):
"""
Iterates through the entire time-series data for a given
body part to extract the X,Y,Z coordinate data.
Args:
nodeFound (object): joint object for the targeted body part
start (int): starting frame number
finish (int): ending frame number
Returns:
list: list of lists of X,Y,Z coordiantes. The number of lists must
equal to number of frames in the BVH
"""
displacements = []
for i in range(start, finish):
X = nodeFound.trtr[i][0][3]
Y = nodeFound.trtr[i][1][3]
Z = nodeFound.trtr[i][2][3]
displacements.append([X, Y, Z])
return displacements | 7c05dcc901d8a0525e4983ce2301a4d40ef2a542 | 691,839 |
from datetime import datetime
def get_datetime_object(datetime_string):
"""
Interpret the UltraSuite prompt date and time string as a python datetime object
:param datetime_string:
:return:
"""
return datetime.strptime(datetime_string, '%d/%m/%Y %H:%M:%S') | 35fe2c9056f28d3d8dbe963121cd8ce93e36550f | 691,840 |
from typing import Sequence
def flatten(nested: Sequence) -> list:
"""
Return vectorized (1D) list from nested Sequence ``nested``.
Args:
nested: Sequence
Iterable sequence containing multiple other nested sequences.
Returns: list
Vectorized (unidimensional) version of ``nested``.
"""
return [lowest for sublist in nested for lowest
in (flatten(sublist)
if bool(isinstance(sublist, Sequence)
and not isinstance(sublist, str))
else [sublist])] | d0acc2359a5dbf1605aedb86fe205e4a3c686f63 | 691,845 |
import math
def calc_conv_dBZlim(coef):
""" Calculate dBZ limit for convective/frontal case Z-R calculation.
Limit with default values is 23.48 dBZ.
Keyword arguments:
coef -- dictionary containing Z(R) A and B coefficients zr_a, zr_b, zr_a_c and zr_a_c (c for convective rain)
Return:
conv_dbzlim -- Limit dBZ value for convective rain rate calculation
"""
zr_a = coef['zr_a']
zr_b = coef['zr_b']
zr_a_c = coef['zr_a_c']
zr_b_c = coef['zr_b_c']
if zr_a == zr_a_c:
conv_dbzlim = 10.0 * math.log10(zr_a)
else:
R = (zr_a / zr_a_c) ** (1.0 / (zr_b_c - zr_b))
conv_dbzlim = 10.0 * math.log10(zr_a * (R ** zr_b))
return conv_dbzlim | 6bd585566776bb9c344c85b3615a677d1d0f2e02 | 691,851 |
from typing import Dict
from typing import Any
from typing import Optional
import requests
def post_request(url : str, data : Dict[str, Any], session : Optional[requests.Session] = None) -> requests.Response:
"""
Post a request to the url with the given data,
optionally using a provided session.
Parameters
----------
url: str
The url to post to.
data: dict[str, Any]
The json data to include in the post request.
session: requests.Session, optional
The persistent session to use, if None is provided
a new one will be created and destroyed for the
individual call.
"""
headers = {
'Content-Type': 'application/json'
}
if session is not None:
return session.post(url, headers=headers, data=data)
return requests.post(url, headers=headers, data=data) | 3f451f42d0f1c9674430d2d9cb3165fbd594940f | 691,853 |
def gauss_kl_white_diag(q_mu, q_sqrt):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, I)
We assume multiple independent distributions, given by the columns of
q_mu and q_sqrt
q_mu is a matrix, each column contains a mean
q_sqrt is a matrix, each column represents the diagonal of a square-root
matrix of the covariance.
"""
KL = 0.5 * (q_mu**2).sum() # Mahalanobis term
KL += -0.5 * q_sqrt.numel()
KL = KL - q_sqrt.abs().log().sum() # Log-det of q-cov
KL += 0.5 * (q_sqrt**2).sum() # Trace term
return KL | 79d00f168257adbbf36f383f4531d39343eb9477 | 691,854 |
def get_venue_response(meetup_id: int = 1, content: bool = False) -> dict:
"""
create a Venue response
Keyword arguments:
meetup_id -- meetup id
content -- if True -> add optional fields
return -> venu dict
"""
response: dict = {
"id": meetup_id,
}
if content:
content_response: dict = {
"address_1": "Berlinerstr. 1",
"address_2": "oben",
"address_3": "unten",
"city": "Berlin",
"country": "Germany",
"lat": 52.520008,
"lon": 13.404954,
"localized_country_name": "Deutschland",
"name": "Meetup Place",
"phone": "030 123 456 789",
"zip_code": "10101",
}
return {**response, **content_response}
return response | 6cd103f57cda70b0a29ce0738fa7bde96678c4f6 | 691,856 |
def solution2array(solution):
""" rewrites an solution of the form {(1,1): [4], (1,2): [5] , .... (9,9) : [1]} to an 2dimensional array.
this is useful if we want to output it in a human readable form.
this is also used as intermediate step for rewriting the sudoku back to the original format.
"""
sudoku_array = []
for i in range(9):
sudoku_array.append([])
for j in range(9):
sudoku_array[i].append(0)
for variable, assignment in solution.iteritems():
if len(assignment) == 1:
sudoku_array[variable[0] -1][variable[1] - 1] = assignment[0]
return sudoku_array | 507b9dfe7b4c2e1296670c2d1a948c40098d7a3c | 691,857 |
def dmp_copy(f, u):
"""Create a new copy of a polynomial `f` in `K[X]`. """
if not u:
return list(f)
v = u-1
return [ dmp_copy(c, v) for c in f ] | b5898ecbddcc3bc081fef116db4c44c7f7f8c793 | 691,858 |
def locToLatLong(location):
"""
:param location: location in string format
:return: latitude and longtitude in float format
"""
long, lat = str(location).split(',')
long = float(long.split(':')[-1])
lat = float(lat.split(':')[-1][:-1])
return lat, long | 7776b4b3a4d5d7491b632a82c7b59312ffb8ea65 | 691,859 |
def get_ax_size(fig, ax):
"""Get the size of an axis
Args:
- fig: figure handle
- ax: the axis handle
Returns:
- tuple: width, height
Scraped from stackoverflow, noref.
"""
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height | 3275f83e6f3ddddfc60d3e7cfcbcda1885faceaa | 691,863 |
def render_core(url_prefix, auth_header, resources):
"""Generate res.core.js"""
code = ''
code += "function(root, init) {\n"
code += " var q = init('%(auth_header)s', '%(url_prefix)s');\n" %\
{'url_prefix': url_prefix, 'auth_header': auth_header}
code += " var r = null;\n"
for key in resources:
code += " r = root.%(key)s = {};\n" % {'key': key}
for action, item in resources[key].items():
code += " r.%(action)s = q('%(url)s', '%(method)s');\n" %\
{'action': action,
'url': item['url'],
'method': item['method']}
code += "}"
return code | 7029fc07044667d2e937e2e229013dd3c00a9c77 | 691,864 |
from typing import Counter
import math
import itertools
def control_smiles_duplication(random_smiles, duplicate_control=lambda x: 1):
"""
Returns augmented SMILES with the number of duplicates controlled by the function duplicate_control.
Parameters
----------
random_smiles : list
A list of random SMILES, can be obtained by `smiles_to_random()`.
duplicate_control : func, Optional, default: 1
The number of times a SMILES will be duplicated, as function of the number of times
it was included in `random_smiles`.
This number is rounded up to the nearest integer.
Returns
-------
list
A list of random SMILES with duplicates.
Notes
-----
When `duplicate_control=lambda x: 1`, then the returned list contains only unique SMILES.
"""
counted_smiles = Counter(random_smiles)
smiles_duplication = {
smiles: math.ceil(duplicate_control(counted_smiles[smiles]))
for smiles in counted_smiles
}
return list(
itertools.chain.from_iterable(
[[smiles] * smiles_duplication[smiles] for smiles in smiles_duplication]
)
) | 1de46f3f94f434668b9b25bd39cff89ee13fe07f | 691,867 |
def valid_alternative_image_text(arch):
"""An `img` tag must have an alt value."""
if arch.xpath('//img[not(@alt or @t-att-alt or @t-attf-alt)]'):
return "Warning"
return True | 53b02bd0c9ab7d3365e6b27fc0d943506b6d3e0c | 691,873 |
def get_check_result(result, numvals):
"""Check the result of a 'get' operation"""
if not isinstance(result, dict):
return "pwrcmd output is not a dict"
# Some errors return only the error itself
if (result['PWR_ReturnCode'] != 0 and
"attr_vals" not in result and
"timestamps" not in result):
return None
# attr_vals must exist as a list with numvals elements
if "attr_vals" not in result:
return "'attr_vals' not found"
if not isinstance(result['attr_vals'], list):
return "'attr_vals' is not a list"
if len(result['attr_vals']) != numvals:
return "expected {} attr_vals".format(numvals)
# timestamps must exist as a list with numvals elements
if "timestamps" not in result:
return "'timestamps' not found"
if not isinstance(result['timestamps'], list):
return "'timestamps' is not a list"
if len(result['timestamps']) != numvals:
return "expected {} timestamps".format(numvals)
# status must exist
if "status" not in result:
return "'status' not found"
return None | 0a5346e21ea260a8f2c49454854d109cab756a6b | 691,874 |
from typing import Tuple
def ntp2parts(ntp: int) -> Tuple[int, int]:
"""Split NTP time into seconds and fraction."""
return ntp >> 32, ntp & 0xFFFFFFFF | 729b3f9ce912e1be54c0c5bafd9c5577a78091b9 | 691,877 |
def linear(input, weight, bias):
"""
Applies a linear transformation of `X @ W.t()+b`
Inputs:
- input: mini-batch input X with dim (N,D1)
- weight: weights matrix with dim (D2, D1)
- bias : Bias with dim (D2,)
Output:
- output: transformed tensor with dim (N,D2)
"""
output = input.mm(weight.t())
if bias is not None:
output += bias
return output | f6fa17dd5d2fc5a69d4fc7025a7324a58e7d11b1 | 691,880 |
import socket
def _get_ip(remote):
"""Get the local IP of a connection to the to a remote host."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect((remote, 1883))
address = sock.getsockname()[0]
finally:
sock.close()
return list(map(int, address.split("."))) | 57580d483a9785de3d51f76bcb9b4014614de5df | 691,883 |
from typing import List
def _interpolation_search(arr: List[int], x: int) -> int:
"""
Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
Args:
arr (:obj:`List[int]`): non-empty sorted list of integers
x (:obj:`int`): query
Returns:
`int`: the position i so that arr[i] <= x < arr[i+1]
Raises:
`IndexError`: if the array is empty or if the query is outside the array values
"""
i, j = 0, len(arr) - 1
while i < j and arr[i] <= x < arr[j]:
k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
if arr[k] <= x < arr[k + 1]:
return k
elif arr[k] < x:
i, j = k + 1, j
else:
i, j = i, k
raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") | edb6075ea532da54ea7b4152fda23a882dcb1113 | 691,884 |
import sqlite3
def db_connection(database):
"""
Connect to an SQL database
"""
return sqlite3.connect(database) | 6990f5b988add38a541fb523e0b58a4eacb12ba5 | 691,889 |
def intersect_lists(l1, l2):
"""Returns the intersection of two lists. The result will not contain
duplicate elements and list order is not preserved."""
return list(set(l1).intersection(set(l2))) | ad5ed35d7458ff96491cb2669670ba7a49cfc930 | 691,890 |
def _adaptive_order_1(q, i, j, recons):
"""
First-order reconstruction.
First-order reconstruction is given by
.. math::
\hat{q}_{i + 1/2} = q_i
"""
recons[2 * i + 1] = q[j]
recons[2 * i + 2] = q[j]
return True | 1a49bed58094988b6e884427c63d3baf5daf1ae8 | 691,891 |
from enum import Enum
def VictoryEnum(ctx):
"""Victory Type Enumeration."""
return Enum(
ctx,
standard=0,
conquest=1,
exploration=2,
ruins=3,
artifacts=4,
discoveries=5,
gold=6,
time_limit=7,
score=8,
standard2=9,
regicide=10,
last_man=11
) | c5c347be3c727a0002ab5f98cd20344b27173c42 | 691,893 |
def str_to_bool(str):
"""Convert a unicode string to equivalent boolean value and is case insensitive."""
if str.lower() == 'true':
return True
elif str.lower() == 'false':
return False
else:
raise ValueError("{} in the app.config file is not a boolean value".format(str)) | 210a30333d4771c51a1821b72e5e3151ebc3f5a6 | 691,894 |
import textwrap
def get_notifiers_provider_config(message, subtitle, title) -> dict:
"""
Return kwargs that will be passed to `notifiers.notify` method.
"""
# different providers have different requirements for the `notify` method
# most seem to take a `message` parameter, but they also have different
# potential formatting requirements for messages.
# use the following provider-specific map for `notify` parameters
provider_config = {
"pushover": {
"message": textwrap.dedent(
f"""
<i>{subtitle}</i>
{message}
"""
),
"title": title,
"html": True,
},
"slack": {"message": message if message else "task complete"},
}
return provider_config | 45d78bdd37925d85eab91aaad430696f23cb4c70 | 691,896 |
def rgb2hex(pix):
"""Given a tuple of r, g, b, return the hex value """
r, g, b = pix[:3]
return "#{:02x}{:02x}{:02x}".format(r, g, b) | e60efd7fbe3a89678f9c53483a2eb1b8587c981c | 691,897 |
def decode_uint256(s: bytes) -> int:
"""Decode 256-bit integer from little-endian buffer."""
assert len(s) == 32
return int.from_bytes(s, 'little') | 7f1158237aee5ad06b0f24a3ce5f029aaa491d32 | 691,899 |
def bg(text, color):
"""Set text to background color."""
return "\33[48;5;" + str(color) + "m" + text + "\33[0m" | b8b801005f4448c7e2a72e6c0e7cbb59ddae9ac5 | 691,904 |
import torch
def reshape_and_split_tensor(tensor, n_splits):
"""Reshape and split a 2D tensor along the last dimension.
Args:
tensor: a [num_examples, feature_dim] tensor. num_examples must be a
multiple of `n_splits`.
n_splits: int, number of splits to split the tensor into.
Returns:
splits: a list of `n_splits` tensors. The first split is [tensor[0],
tensor[n_splits], tensor[n_splits * 2], ...], the second split is
[tensor[1], tensor[n_splits + 1], tensor[n_splits * 2 + 1], ...], etc..
"""
feature_dim = tensor.shape[-1]
tensor = torch.reshape(tensor, [-1, feature_dim * n_splits])
tensor_split = []
for i in range(n_splits):
tensor_split.append(tensor[:, feature_dim * i: feature_dim * (i + 1)])
return tensor_split | cf4015fc8fea1fc32dcbb8dd5b956c163d71d90a | 691,905 |
def _join(words):
"""Join words into single line.
Args:
words: List of words.
Returns:
String with space separated words.
"""
return u' '.join(words) if words else u'' | 21707a9abebf03afc1cf81c7454ee172f1b40d04 | 691,908 |
def event_rank(count):
"""Determine event ranking."""
if count < 10:
return 'constable'
elif count < 100:
return 'sergeant'
elif count < 250:
return 'inspector'
elif count < 500:
return 'superintendent'
elif count < 1000:
return 'commander'
else:
return 'commissioner' | 17980d6e8508932607fd108c99da094c3b63b510 | 691,911 |
from typing import Counter
def get_vocab(training_set, vocab_size_threshold=5):
"""Get the vocabulary from the training set"""
vocab = []
for st in training_set:
for s in st:
vocab.extend(s)
vocab = Counter(vocab)
vocab_truncate = [w for w in vocab if vocab[w] >= vocab_size_threshold]
word2id = {"_GOO": 0, "_EOS": 1, "_PAD": 2, "_UNK": 3}
id2word = {0: "_GOO", 1: "_EOS", 2: "_PAD", 3: "_UNK"}
i = len(word2id)
for w in vocab_truncate:
word2id[w] = i
id2word[i] = w
i += 1
assert(len(word2id) == len(id2word))
print("vocabulary size: %d" % len(word2id))
return word2id, id2word | 0a3e8e20736904c53016bd2d572d7c6b3df3ed20 | 691,913 |
def tracking(gfftracking):
"""
Read the data from the GffCompare gffcmp.tracking file and
format as a dictionary.
Only takes three-way matches from the file.
:return: tcons_XXXX (key) : [[transcript_id 1],
[transcript_id 2],
[transcript_id 3]] (value)
"""
tcons = {}
with open(gfftracking) as file:
for line in file:
line = line.split()
transcripts = line[4::]
temp_list = []
# '-' means that at least one pipeline did not have a matching transcript
# only take lines with three transcripts
if '-' not in transcripts:
for transcript in transcripts:
temp_list.append(transcript.split('|')[1])
tcons[line[0]] = temp_list
return tcons | c3078990ea9e820dcdffca54a090aba67b05e555 | 691,917 |
def CreateLookUpTable(numStrands, lengthStrands):
"""
Returns a look up table for the scaffold in string formatm
initialized with empty initial values = ''.
"""
lookUpScaffold = [['' for x in range(lengthStrands)]
for y in range(numStrands)]
return lookUpScaffold | 731a2a0fc04f0ece934c7b43f326838c8c410b09 | 691,918 |
def scan_aggs(search, source_aggs, inner_aggs={}, size=10):
"""
Helper function used to iterate over all possible bucket combinations of
``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
``composite`` aggregation under the hood to perform this.
"""
def run_search(**kwargs):
s = search[:0]
s.aggs.bucket('comp', 'composite', sources=source_aggs, size=size, **kwargs)
for agg_name, agg in inner_aggs.items():
s.aggs['comp'][agg_name] = agg
return s.execute()
response = run_search()
while response.aggregations.comp.buckets:
for b in response.aggregations.comp.buckets:
yield b
if 'after_key' in response.aggregations.comp:
after = response.aggregations.comp.after_key
else:
after= response.aggregations.comp.buckets[-1].key
response = run_search(after=after) | bfd1324cd0174ab679698870a032ebc5fb8dfd7d | 691,921 |
def rwrap(some_string):
"""Wraps a string to be red."""
return "\033[91m%s\033[0m" % some_string | 8e9151a54b8aca372eda838ae387514ae3cbf500 | 691,925 |
async def healthCheck():
"""
Returns 200 for a healthcheck for AWS
"""
return {'ok'} | 6cc232df6661f26a1db4e4f6bf35e5de284abff1 | 691,926 |
def _to_int(timestamp):
"""Return the integral part of a timestamp.
Parameters:
timestamp -- NTP timestamp
Retuns:
integral part
"""
return int(timestamp) | f5e8dd7d58228a5d22d2a736341daf74fb020f79 | 691,927 |
def gft(s, psi):
"""gft: Graph Fourier Transform (GFT)
Args:
s (N x d np.ndarray): Matrix of graph signals. Each column is a signal.
psi (N x N np.ndarray): graph Laplacian eigenvectors
Returns:
s_hat (N x d np.ndarray): GFT of the data
"""
s_hat = psi.T @ s
return s_hat | dec850d7b0ffedc488c5990edc8191f1c8ec6ec6 | 691,929 |
def CtoK(T_C):
"""Converts Celsius to Kelvin."""
return T_C + 273.15 | ec96a27c012f4dcfde5ac3566825387497193b0f | 691,931 |
def ensureUtf(s, encoding='utf8'):
"""Converts input to unicode if necessary.
If `s` is bytes, it will be decoded using the `encoding` parameters.
This function is used for preprocessing /source/ and /filename/ arguments
to the builtin function `compile`.
"""
# In Python2, str == bytes.
# In Python3, bytes remains unchanged, but str means unicode
# while unicode is not defined anymore
if type(s) == bytes:
return s.decode(encoding, 'ignore')
else:
return s | e2ae57687f6f4310e43f4d54c04d820127502528 | 691,932 |
from datetime import datetime
def check(str1, str2, format_str):
"""Check if two strings are equal (based on format), or both are redacted."""
try:
str1_conv = datetime.strptime(str1, format_str)
str2_conv = datetime.strptime(str2, format_str)
if str1_conv == str2_conv:
return True
else:
return False
except ValueError:
if str1 == str2:
return True # both are redacted the same way, assume correct.
else:
return False | 0f03443136ebe1d55d360147dcf25aa40dffb167 | 691,935 |
def last_kstp_from_kper(hds,kper):
""" function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
#find the last kstp with this kper
kstp = -1
for kkstp,kkper in hds.kstpkper:
if kkper == kper+1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp | 5b1936900598a98d0cb4b64d88cabcd7b5802138 | 691,936 |
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int points_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element | d49204c49ddfeca34caaaedd85b770f7230aa7c9 | 691,940 |
def get_sex(sex):
"""Return a consistent sex notation (male, female)."""
if sex.lower() == 'm':
return 'male'
if sex.lower() == 'f':
return 'female'
return sex.lower() | cb0b99757008c7f9e2a6e0e71dad9bc169aae564 | 691,945 |
def policy_rollout(agent, env, num_traj, horizon):
"""Rollout an agent to collect trajectories.
Args:
agent: an agent to rollout.
env: an environment to perform rollouts.
num_traj: the number of trajectories to collect.
horizon: the maximal number of steps for each trajectory.
Returns:
states, actions, rewards and observations from rollout trajectories.
"""
traj_states = []
traj_actions = []
traj_rewards = []
traj_observations = []
for _ in range(num_traj):
time_step = env.reset()
states = []
rewards = []
actions = []
observations = []
for _ in range(horizon):
# MuJoCo specific operations.
states.append(env._gym_env.get_state()) # pylint: disable=protected-access
observations.append(time_step)
action = agent.action(time_step.observation)
actions.append(action)
time_step = env.step(action)
rewards.append(float(time_step.reward))
if time_step.is_last():
break
traj_states.append(states)
traj_actions.append(actions)
traj_rewards.append(rewards)
traj_observations.append(observations)
return traj_states, traj_actions, traj_rewards, traj_observations | 9acdb3767f92626715fb9fbd084e5d42513bc394 | 691,947 |
def pick_files(profile_dir, **kwargs):
"""
Return paths to the files from the profile that should be backed up.
There are 17 files that can be backed up. They have been organized into 11
categories for your convenience:
- autocomplete
- bookmarks
- certificates
- cookies
- dictionary
- download_actions
- passwords
- preferences
- search_engines
- site_settings
- styles
By default all 17 files will be backed up, but you can prune any of the
above categories by passing it as a keyword argument set to False, i.e.
``cookies=False''.
"""
profile_files = { # (no fold)
'autocomplete': [
'formhistory.sqlite',
],
'bookmarks': [
'places.sqlite',
'bookmarkbackups',
],
'certificates': [
'cert8.db',
],
'cookies': [
'cookies.sqlite',
],
'dictionary': [
'persdict.dat',
],
'download_actions': [
'mimeTypes.rdf',
],
'passwords': [
'key3.db',
'logins.json',
],
'preferences': [
'prefs.js',
'user.js',
],
'search_engines': [
'search.json',
'searchplugins',
],
'site_settings': [
'permissions.sqlite',
'content-prefs.sqlite',
],
'styles': [
'chrome/userChrome.css',
'chrome/userContent.css',
],
}
picked_files = []
for key in profile_files:
if kwargs.get(key, True):
picked_files += [profile_dir / x for x in profile_files[key]]
return [x for x in picked_files if x.exists()] | 623e97335747a23f9aee8433d1e3934e29edfd4d | 691,948 |
def upstream_ids(id, fromtoseries, maxcycle=1e6):
"""Return all ids upstream of id given a from (index) to (values) map.
"""
s = [id]
ids = []
cycles = 0
while len(s) > 0:
si = []
for i in s:
si.extend(list(fromtoseries[fromtoseries == i].index))
ids.extend(si)
s = si
cycles += 1
if cycles > maxcycle:
raise RuntimeError('maxcycles reached. Circular fromto?')
return ids | e695f84958a4297578972d407ea5278416da0b40 | 691,953 |
def document_order(node):
"""Compute a document order value for the node.
cmp(document_order(a), document_order(b)) will return -1, 0, or 1 if
a is before, identical to, or after b in the document respectively.
We represent document order as a list of sibling indexes. That is,
the third child of the document node has an order of [2]. The first
child of that node has an order of [2,0].
Attributes have a sibling index of -1 (coming before all children of
their node) and are further ordered by name--e.g., [2,0,-1,'href'].
"""
# Attributes: parent-order + [-1, attribute-name]
if node.nodeType == node.ATTRIBUTE_NODE:
order = document_order(node.ownerElement)
order.extend((-1, node.name))
return order
# The document root (hopefully): []
if node.parentNode is None:
return []
# Determine which child this is of its parent.
sibpos = 0
sib = node
while sib.previousSibling is not None:
sibpos += 1
sib = sib.previousSibling
# Order: parent-order + [sibling-position]
order = document_order(node.parentNode)
order.append(sibpos)
return order | 788c211e064718b9cd4e4b38d2616c0cfdfd645d | 691,955 |
def chained(fn):
"""Chain instance methods
Can do things like user.unfollow().follow().unfollow()
"""
def _decorator(self, *args, **kwargs):
fn(self, *args, **kwargs)
return self
return _decorator | f7d66a17c47759ede545d896343a38cfe9a092ad | 691,959 |
def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks):
"""
Compute expected returns for the portfolio, assuming equal investment in each long/short stock.
Parameters
----------
df_long : DataFrame
Top stocks for each ticker and date marked with a 1
df_short : DataFrame
Bottom stocks for each ticker and date marked with a 1
lookahead_returns : DataFrame
Lookahead returns for each ticker and date
n_stocks: int
The number number of stocks chosen for each month
Returns
-------
portfolio_returns : DataFrame
Expected portfolio returns for each ticker and date
"""
return (lookahead_returns*(df_long - df_short)) / n_stocks | 39459b0d68ff6c887b1de8e7303f0674578b8bac | 691,964 |
from dateutil import tz
from datetime import datetime
def convertTimeToLocal(time: str):
"""Converts an ISO format UTC time to a local time.
Args:
time: UTC time to be converted in ISO format.
Returns:
The time in local time based on POSIX TZ variable.
"""
from_zone = tz.gettz('UTC')
to_zone = tz.gettz() # May want to get local time for lat long in the future
utc = datetime.fromisoformat(time)
utc = utc.replace(tzinfo=from_zone)
local = utc.astimezone(to_zone)
return local | 4e1663dfda16be87b4583372944e0bdf72ce8092 | 691,966 |
from typing import Optional
def _to_volts(value: Optional[float]):
"""Convert µV to V if not None."""
if value is None:
return None
return float(value) / 1e6 | 623b6824c7307352fa01f7c00502adab8a10c451 | 691,967 |
from bs4 import BeautifulSoup
import re
def get_last_episode(html_content):
"""Return the highest episode number availabel (int)"""
soup = BeautifulSoup(html_content)
max = 0
for link in soup.find_all('a', text=re.compile('Episode')):
for s in link.string.split():
if s.isdigit():
number = int(s)
if number > max:
max = number
return max | d6df9f00b502200c54cd9d439d4e0e34eddf1e77 | 691,970 |
import hashlib
def sha_sum(fname):
"""Returns the sha256 checksum of a file.
Args:
fname (str): Path to a file.
Returns:
str: The sha256 checksum as a hex string.
"""
hasher = hashlib.sha256()
with open(fname, 'rb') as fd:
for block in iter(lambda: fd.read(65536), b""):
hasher.update(block)
return hasher.hexdigest() | e3a95c962d50cdc8118b4298a3b083b7bcd6ee58 | 691,975 |
def torrentfile_create(args, creator, name): # pragma: no cover
"""Create new .torrent file in a seperate thread.
Parameters
----------
args : `dict`
keyword arguements for the torrent creator.
creator : `type`
The procedure class for creating file.
name : `list`
container to add return values to.
Returns
-------
`list`
container for path to created torrent file.
"""
torrent = creator(**args)
outfile, _ = torrent.write()
name.append(outfile)
return name | 3a5f8cd94b406fa045dc71c94d5629394025e8b8 | 691,977 |
def mk_int(s):
"""
Function to change a string to int or 0 if None.
:param s: String to change to int.
:return: Either returns the int of the string or 0 for None.
"""
try:
s = s.strip()
return int(s) if s else 0
except:
return s | 7d1c4133c0571b25ef4f161a1bd6b1e094ebec7e | 691,984 |
def kortti_välissä(kortti1, kortti2, kortti3):
"""Tarkistaa onko ensimmäinen kortti kahden seuraavan välissä.
Parametrit
----------
kortti1, kortti2, kortti3 : tuple(int, str)
Vertailtavat kortit
Palauttaa
---------
True
jos kortti1 on korttien 2 ja 3 välissä.
"""
# Maalla ei ole väliä vertailussa
arvo1 = kortti1[0]
arvo2 = kortti2[0]
arvo3 = kortti3[0]
return (arvo1 > min(arvo2, arvo3)) and (arvo1 < max(arvo2, arvo3)) | c729a17530ce7b3b8feed3f388287d34d3769dcd | 691,987 |
def trySurroundSelection(editor, char, map_table):
"""Try to do char-surrounding using a mapping table.
Will not do any surrounding if a keyboard modifier key (e.g. Ctrl) is in pressed state.
If the editor has multiple selections, each selection will be surrounded separately.
:param editor: editor where to try to do surrounding
:type editor: :any:`eye.widgets.editor.Editor`
:param char: the character to do the surrounding
:type char: str
:param map_table: mapping table listing chars and their replacement
:type map_table: dict[str, str]
:returns: True if a char surrounding was performed, else False. The value can be used for
returning from an event filter function.
:rtype: bool
"""
if char not in map_table:
return False
# when a surrounding is done, it will shift (invalidate) all line-indexes after it
# doing in reverse order avoids having to compute shifting
sels = reversed([editor.getSelectionN(n) for n in range(editor.selectionsCount())])
with editor.undoGroup(True):
for sel in sels:
editor.setSelection(*sel)
s = editor.selectedText()
editor.replaceSelectedText(map_table[char] % s)
return True | 7388f3d34c9d3a1cf6218ca1c76448092b9c2102 | 691,988 |
def squared_loss(y_hat, y): #@save
"""Squared loss."""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 | 323671e7d9a989870fa3ab04a8770d7fbf49b2ed | 691,989 |
def add(arg1, arg2):
"""
Function for adding two variables
"""
return arg1 + arg2 | dde33dc5030943afd9da6afd63b070045f5df09e | 691,990 |
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent'
return parent_refs[0] | b8db4f1eae1f4f723f368f993d16fa7f7c86e6c4 | 691,991 |
def read_file(file):
""" Open given file and returns the content as a string. """
with open(file) as f:
wall = f.read()
return wall | bcfb29a31ac2cfcdf5b733a0bfd3019889a30fba | 691,992 |
def linear_function(x, a, b):
""" Equation for a line.
Parameters:
x: array
The independet variable where the data is measured.
a: float
The linear coefficient.
b: float
The angular coefficient.
Returns:
f: array
The linear function.
"""
f = a+b*x
return f | 542ff23efb35e273043d64f0f13027792d6394a5 | 691,993 |
def rk4_ode(df_ds, x0, s, ds, *f_args, f1=None):
"""Implements Runge-Kutta RK4 for numerically solving an ODE."""
# https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
if f1 is None:
f1 = df_ds(x0, s, *f_args)
f2 = df_ds(x0 + 0.5 * ds * f1, s + 0.5 * ds, *f_args)
f3 = df_ds(x0 + 0.5 * ds * f2, s + 0.5 * ds, *f_args)
f4 = df_ds(x0 + ds * f3, s + ds, *f_args)
return x0 + (ds / 6.0) * (f1 + 2 * f2 + 2 * f3 + f4) | 5b0ed8225f5eaa07f6a14d5e9d960e19d172c98b | 691,994 |
def recolour(shape, source_colours, target_colour):
""" Recolours a shape from source_colours to target_colour.
>>> recolour([(0, 0, 1), (0, 1, 1), (0, 2, 1), (0, 3, 5)], [1], 4)
[(0, 0, 4), (0, 1, 4), (0, 2, 4), (0, 3, 5)]
>>> recolour([(0, 0, 1), (0, 1, 1), (0, 2, 2), (0, 3, 5)], [1, 2], 4)
[(0, 0, 4), (0, 1, 4), (0, 2, 4), (0, 3, 5)]
"""
new_shape = []
for cell in shape:
y, x, colour = cell
if colour in source_colours:
colour = target_colour
new_shape.append((y, x, colour))
return new_shape | e9a347c339cf8614917cf71a23809207fb8f2a6f | 691,997 |
import torch
def gaussian_stein_kernel(
x, y, scores_x, scores_y, sigma, return_kernel=False
):
"""Compute the Gaussian Stein kernel between x and y
Parameters
----------
x : torch.tensor, shape (n, p)
Input particles
y : torch.tensor, shape (n, p)
Input particles
score_x : torch.tensor, shape (n, p)
The score of x
score_y : torch.tensor, shape (n, p)
The score of y
sigma : float
Bandwidth
return_kernel : bool
whether the original kernel k(xi, yj) should also be returned
Return
------
stein_kernel : torch.tensor, shape (n, n)
The linear Stein kernel
kernel : torch.tensor, shape (n, n)
The base kernel, only returned id return_kernel is True
"""
_, p = x.shape
d = x[:, None, :] - y[None, :, :]
dists = (d ** 2).sum(axis=-1)
k = torch.exp(-dists / sigma / 2)
scalars = scores_x.mm(scores_y.T)
scores_diffs = scores_x[:, None, :] - scores_y[None, :, :]
diffs = (d * scores_diffs).sum(axis=-1)
der2 = p - dists / sigma
stein_kernel = k * (scalars + diffs / sigma + der2 / sigma)
if return_kernel:
return stein_kernel, k
return stein_kernel | 02beba589956c8726d31afff93028d7560c3f310 | 691,998 |
def queue_text(state):
"""Returns a block of text describing a given song queue."""
index = 0
if len(state.playlist) > 0:
message = [f"{len(state.playlist)} songs in queue. Currently playing {state.current_index}"]
message += [
f" {index}. **{song.title}** (requested by **{song.requested_by.name}**)"
for (index, song) in state.playlist.items()
] # add individual songs
return "\n".join(message)
else:
return "The play queue is empty." | a55dc91b68f23d74eb28b9d59e199cf9ce1ba753 | 692,000 |
def get_digit_num(num: int) -> int:
"""
Return sum of each in a number.
"""
res = 0
while num:
res += num % 10
num //= 10
return res | 5670ff237c913a17c1c935c165758b3c5aff9e97 | 692,001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.