content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def rk2(y, f, t, h):
"""Runge-Kutta RK2 midpoint"""
k1 = f(t, y)
k2 = f(t + 0.5*h, y + 0.5*h*k1)
return y + h*k2 | 6d08e6c0893a2e71e1d2f908009e04d302d62223 | 692,006 |
import requests
def url_ok(url):
"""
Check if server at remote URL answers ok
"""
req = requests.head(url)
return req.status_code == 200 | 42380ce706a01d82b9206293b92f063e36d8300a | 692,007 |
import re
def make_name_safe(project_name):
"""
Converts a name with underscores into a valid class name (PascalCase).
E.g. test_project will become TestProject
:param project_name: the name we want to convert
:type project_name: str
"""
words = re.split('_', project_name)
result = ''
for word in words:
result += word.capitalize()
return result | 86f634a2bbf6e5fb2f7da721690e6e83714dd3e2 | 692,008 |
def A1ToXy(space):
"""Convert user-friendly coordinates (like A1 or B5) to (x, y)
coordinates (like 0,0 or 1,4)."""
column = space[0]
row = space[1:]
# The ASCII value of 'A' is 65:
return (ord(column) - 65, int(row) - 1) | 8f576cf7b8d1750dc52c3117ba075686ff29ac74 | 692,010 |
def emission_objective(model):
"""
The emission objective calculates the total emissions.
the total emissions are equal to the amount of energy bought from which supplier * load hours * carbon emissions per MWh
:param model: model reference
:return: total emissions
"""
total_emissions = []
for year in model.YEARS:
for supplier in model.SUPPLIER:
for block in model.BLOCKS:
emission = model.buy[year, supplier, block] * model.carbon_emissions[supplier] * model.load_hours[block]
total_emissions.append(emission)
return sum(total_emissions) | be532bfb30e1a3bcef7335ed7058ad977fdee9a2 | 692,014 |
def common_cols(df1,df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns)) | 1119806c0ba43ba4c5e272c8292b40c201e3f952 | 692,015 |
def date_remove_dashes(std_date):
"""STD_DATE is a date in string form with dashes. Removes dashes for storage in JSON."""
return std_date[0:4] + std_date[5:7] + std_date[8:] | 67e6337a90d6d966325cdd96f4b1844da540fa2e | 692,016 |
def to_interval(points: list):
""" Transforms the set of points into set of intervals - Orthogonal hull
Args:
points (list of tuples): which are the points
Example:
POINT INTERVALS
A B X Y
[(0, 2), (1, 3)] --> [[0, 1], [2, 3]]
Example 2:
A B C X Y
[(0, 2), (1, 5), (4, 3)] --> [[0, 4], [2, 5]]
Example 3:
A B C X Y Z
[(0, 2, 9), (1, 5, 0), (4, 3, 6)] --> [[0, 4], [2, 5], [0, 9]]
"""
intervals = []
for dimension in range(len(points[0])):
interval = [points[0][dimension], points[0][dimension]]
for point in range(len(points)):
if interval[0] > points[point][dimension]:
interval[0] = points[point][dimension]
if interval[1] < points[point][dimension]:
interval[1] = points[point][dimension]
intervals.append(interval)
return intervals | 782a6f41b536091a9aceff3ea216c3aa100e7aff | 692,018 |
def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
# Note that not all methods iterpret maxiter in the same manner
if "maxiter" in tuning_options.strategy_options:
maxiter = tuning_options.strategy_options.maxiter
else:
maxiter = 100
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
# pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
# not all methods support 'disp' option
if not method in ['TNC']:
kwargs['disp'] = tuning_options.verbose
return kwargs | a83ab4dc597a2818ebe258100a5c401a4bfedc33 | 692,021 |
def local_time(time):
"""Returns the local time of the given (e.g. UTC) time.
Args:
time: A `datetime.datetime` object.
Returns:
A `datetime.datetime` object with local (system) time zone.
"""
return time.astimezone() | 085a59e16d9aeec30e861d2e97ae196e18ac6b46 | 692,023 |
def search(state, path):
"""Get value in `state` at the specified path, returning {} if the key is absent"""
if path.strip("/") == '':
return state
for p in path.strip("/").split("/"):
if p not in state:
return {}
state = state[p]
return state | 49b19542dc8ddf0a0c29494dbe1e6a14f269734a | 692,024 |
def has_el(el, el_name):
"""Return True if an element with a given name exists in the branch rooted at el"""
return True if el.xpath(f'.//{el_name}') else False | 8deba680775ba0d0b49c99c2acecbd2833da793d | 692,026 |
def get_dict_list(data_array):
"""Returns a list of dictionaries based on the column headers
(the 0th line in the column headers)
"""
key_list = data_array[0]
dict_list = []
for index, line in enumerate(data_array[1:]):
params = {}
for i in range(len(key_list)):
# try:
# params[key_list[i]] = float(line[i])
# except ValueError:
params[key_list[i]] = line[i]
dict_list.append(params)
return dict_list | bd39aea582205bbd39b111ba344f132f55e97c8b | 692,033 |
from datetime import datetime
def convert_str_to_date(date: str):
"""
Function to convert a date in a string format into a datetime YYYY/MM/DD.
:param date: (str) Date in a string format
:return: (datetime) return datetime of a string date. The time will always be 0.
"""
try:
return datetime.strptime(date, "%Y/%m/%d").date()
except ValueError:
try:
return datetime.strptime(date, "%Y-%m-%d").date()
except ValueError as error:
raise error | 7dc57344cc99f8c9066aedddddd89f479764b444 | 692,035 |
def _get_date_fields(date):
""" This function converts a datetime object to a map object contaning the date.
Args:
date: Type datetime.
Returns:
map object in the format {'year':int, 'month':int, 'day':int}.
"""
return {'year': date.year, 'month': date.month, 'day': date.day} | 47887e57a610ae94a8b7618e53c477357bcffe36 | 692,036 |
from typing import Any
def maybelen(value: Any) -> int:
"""
A "maybified" version of the len() function.
"""
return len(value) | 2b43e4bc7a52fa7a854915f12d0bb324663596e0 | 692,037 |
import re
def pulse_factory(cls, name=None, **kwargs):
"""Returns a function that creates an instance
if the given pulse class.
Keyword arguments are passed to ``cls.__init__()``.
Args:
cls (type): Subclass of Pulse of which to create an instance.
name (optional, str): Name of the resulting pulse. If None,
will use a snake-case version of the class name,
e.g. 'GaussianPulse' -> 'gaussian_pulse'. Default: None.
Returns:
callable: A function that takes no arguments and returns
an instance of ``cls``.
"""
if name is None:
# turn 'GaussianPulse' into 'gaussian_pulse'
name = "_".join(re.findall("[a-zA-Z][^A-Z]*", cls.__name__)).lower()
return lambda: cls(name=name, **kwargs) | 9ab2b10960e2484e5b761721740e14674f89ec05 | 692,038 |
def _in_directories(filename, dirs):
"""Tests whether `filename` is anywhere in any of the given dirs."""
for dirname in dirs:
if (filename.startswith(dirname)
and (len(filename) == len(dirname) or filename[len(dirname)] == '/')):
return True
return False | fc93e4bef45a4364446c0daa3e5969f143fbacc4 | 692,039 |
import random
def gen_sequence(length):
"""
Generates a test sequence to hash of size length
:param length: size of sequence
:return: bytes sequence of length length
"""
options = ['a', 'b', 'c', 'd', 'e', 'f']
string = ''.join([random.choice(options) for _ in range(length)])
return string.encode() | da2562a6d88184c0498546c90c2057ee93e79e00 | 692,040 |
def _float_parameter(level: float, maxval: float):
"""Helper function to scale a value between ``0`` and ``maxval`` and return as a float.
Args:
level (float): Level of the operation that will be between [0, 10].
maxval (float): Maximum value that the operation can have. This will be scaled to
level/10.
Returns:
float: The result from scaling ``maxval`` according to ``level``.
"""
return float(level) * maxval / 10. | b13f0fe99f921997f4b54b93f3d2649c2ea8253a | 692,043 |
import string
def _get_symbol(i):
"""Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to
51."""
if i >= len(string.ascii_letters):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(string.ascii_letters)} wire cuts to a circuit"
)
return string.ascii_letters[i] | 65197c4d43792e52db9c596f40fe3802291e256b | 692,044 |
def per_iter(per_iter, n_iter, result, header):
""" Optionally modifies speed functions to produce per-iteration results.
Args:
per_iter: Whether or not to do the modification.
n_iter: The number of iterations.
result: The speed estimate.
header: The unmodified header.
Returns:
result: result / n_iter
header: With "per iteration" appended.
"""
if n_iter == 0:
n_iter = 1
if per_iter:
result /= n_iter
header = header + " per iteration"
return result, header | a3cc08de96b9d00fab2df64be9087a7e4eb4cc81 | 692,047 |
def get_filenames(i):
"""Returns the filepaths for the output MusicXML and .png files.
Parameters:
- i: unique identifier for the score
Returns:
- (sheet_png_filepath, musicxml_out_filepath)
"""
output_folder_prefix = "dataset/"
sheet_png_out_filepath = output_folder_prefix + "{}-sheet.png".format(i)
musicxml_out_filepath = output_folder_prefix + "{}-musicxml.xml".format(i)
return (sheet_png_out_filepath, musicxml_out_filepath) | 4e26391eff80e08756431221d53c862c810c8071 | 692,048 |
def split_list_by(lst, key):
"""
Splits a list by the callable *key* where a negative result will cause the
item to be put in the first list and a positive into the second list.
"""
first, second = [], []
for item in lst:
if key(item):
second.append(item)
else:
first.append(item)
return (first, second) | de69cf738f6bfbaf1b81245d9ad89ba906f1ca71 | 692,056 |
def create_padding_block(sizeOfPaddingAndHeaderInBytes):
"""
Creates an analog UXG binary padding block with header. The padding block
is used to align binary blocks as needed so each block starts on a 16 byte
boundary. This padding block is also used to align PDW streaming data on
4096 byte boundaries.
Args:
sizeOfPaddingAndHeaderInBytes (int): Total size of resulting padding
binary block and header combined.
Returns:
binary block containing padding header and padded data
"""
paddingHeaderSize = 16
paddingFillerSize = sizeOfPaddingAndHeaderInBytes - paddingHeaderSize
padBlockId = (1).to_bytes(4, byteorder='little')
res3 = (0).to_bytes(4, byteorder='little')
size = (paddingFillerSize).to_bytes(8, byteorder='little')
# Padding Header Above = 16 bytes
# X bytes of padding required to ensure PDW stream contents
# (not PDW header) starts @ byte 4097 or (multiple of 4096)+1
padData = (0).to_bytes(paddingFillerSize, byteorder='little')
padding = [padBlockId, res3, size, padData]
return padding | 29432db62e06fa57380df805b372aca110f62cdd | 692,057 |
import json
def load_json_hyper_params(file):
"""
Loads hyper_parameters dictionary from .json file
:param file: string, path to .json file
:return: dict, hyper-paramters.
"""
with open(file, mode='r') as f:
hyper_params = json.load(f)
print('Read %d hyperparameters from %s' % (len(hyper_params.keys()), file))
return hyper_params | 7e901ddcd707c2207ecf71644a82bdd207057c2e | 692,058 |
def truncate_string(s, limit=140):
"""Truncate a string, replacing characters over the limit with "...".
"""
if len(s) <= limit:
return s
else:
# Try to find a space
space = s.rfind(' ', limit - 20, limit - 3)
if space == -1:
return s[:limit - 3] + "..."
else:
return s[:space] + "..." | aa476e750fe8d4864d06791ab89a1e9324f4f7c5 | 692,061 |
def snippet(func):
"""Mark ``func`` as a snippet example function."""
func._snippet = True
return func | 2eccf19d866af8b44568ed82dc72cca45287d081 | 692,065 |
def remove_disambiguation(doc_id):
"""
Normalizes and removes disambiguation info from a document ID.
"""
doc_id = doc_id.replace('_', ' ').replace('-COLON-', ':')
if '-LRB-' in doc_id:
doc_id = doc_id[:doc_id.find('-LRB-') - 1]
return doc_id | d31ff2e35f8daff0edbcb93fc10a99a53fb1de4a | 692,069 |
def map_to_slurm(corsika_dict, mapping):
"""Map a dictionary's CORSIKA run keys to SLURM task keys using mapping"""
res = {}
for run_no, content in corsika_dict.iteritems():
res[mapping[run_no]] = content
return res | 68803b8b4ae4e1fbb9de59f95be0eecd4092043b | 692,070 |
def calculate_proportional_dimensions(display, image):
"""Calculate proportional dimensions for given image based on display
dimensions
"""
adjusted_width = int(display.resolution_h * image.width / image.height)
adjusted_height = int(display.resolution_w * image.height / image.width)
if adjusted_height < display.resolution_h:
# Return size based on display height - adjusted image width is
# too small to fill display
return (adjusted_width, display.resolution_h)
# Return size based on display width in the common case
return (display.resolution_w, adjusted_height) | 55d4a667f2fbbaae8bba7141ba7636bb8918e8fa | 692,072 |
import logging
def local_file_exists(filename):
"""
Check if local file exists.
:param filename: String
:return: True if file exists, else False
"""
# check the path
try:
with open(filename):
return True
except FileNotFoundError as e:
logging.error(e)
return False | 7a41d40cabbe9a6591a92af88ba7c88da07e24be | 692,075 |
import re
def clean_whitespace(text):
"""
Remove any extra whitespace and line breaks as needed.
"""
# Replace linebreaks with spaces
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
# Remove any leeding or trailing whitespace
text = text.strip()
# Remove consecutive spaces
text = re.sub(' +', ' ', text)
return text | 769602b6945155a02f6a59937ede7e9dd655005a | 692,077 |
import ipaddress
import re
def uncompress(addr):
"""Return the uncompressed form of an address, adding leading zeroes
"""
ip = ipaddress.ip_interface(addr)
version = re.sub("Interface", "", ip.__class__.__name__)
if version == "IPv4":
return ".".join(map(lambda x: "%03d" % int(x), str(ip.ip).split(".")))
if version == "IPv6":
return ip.ip.exploded | dc8b87b2e6a66442965eb7ba2d5208db25b8fc52 | 692,078 |
def getAbortState(*args):
"""Access protected class data of the system Abort State. This approach is a secure method to access
saved system state values. If an abort is tripped, the test stand control system will automatically
enter 'Safe' mode and valve configuration
Arguments
---------
args[0] : ucl --> UpperClass Instance
Returns
-------
abort_state : Abort State Integer
state[0] = system is nominal
state[1] = system abort
"""
return args[0].Controls.AbortState.abort_state | 490116de3b9f9de5b4a188e29e68110de992dccf | 692,079 |
def get_current_connection_name(context):
"""
Return the name of the current connection or None if there is no
current connection
"""
return context.pywbem_server.name if context.pywbem_server else None | 4ecfaed7eca01a9631f1931f5c795f416a5ff6c0 | 692,083 |
def json_parts(json_line):
""" Checks for "label" and "description" components of Json command.
PArameters:
json_line (str): The Json command being passed into the function.
Returns:
Arr [Bool, Bool]: Is there a label command? Is there a description
command?
"""
labF, desF = False, False
if "\"label\"" in json_line:
labF = True
if "\"description\"" in json_line:
desF = True
return [labF, desF] | 168f6cd064b7f443539b3cd8a187a7234085ef7b | 692,084 |
import json
def to_json(config):
"""Converts a JSON-serializable configuration object to a JSON string."""
if hasattr(config, "to_json") and callable(config.to_json):
return config.to_json(indent=2)
else:
return json.dumps(config, indent=2) | 1fe7d27cc944abeb9830b11c31c424a9266e63eb | 692,092 |
import warnings
def _label_compcor(confounds_raw, compcor_suffix, n_compcor):
"""Builds list for the number of compcor components."""
compcor_cols = []
for nn in range(n_compcor + 1):
nn_str = str(nn).zfill(2)
compcor_col = compcor_suffix + "_comp_cor_" + nn_str
if compcor_col not in confounds_raw.columns:
warnings.warn(f"could not find any confound with the key {compcor_col}")
else:
compcor_cols.append(compcor_col)
return compcor_cols | 437cfb03a51e26bd94cd11786eb05df202771c6e | 692,094 |
def extractDidParts(did, method="dad"):
"""
Parses and returns keystr from did
raises ValueError if fails parsing
"""
try: # correct did format pre:method:keystr
pre, meth, keystr = did.split(":")
except ValueError as ex:
raise ValueError("Invalid DID value")
if pre != "did":
raise ValueError("Invalid DID identifier")
if meth != method:
raise ValueError("Invalid DID method")
return keystr | 66975b935576f77c223ac8d59a3fffc648afd139 | 692,096 |
def generate_random_node_features(random,
num_batches,
num_nodes,
num_node_features):
"""Generates a random node feature matrix
Args:
random: The NumPy random number generator to use.
num_batches: The number of batches to generate (use 'None' for no batch
dimension).
num_nodes: The number of nodes to generate.
num_node_features: The number of features per node.
Returns:
A NumPy array of shape (num_batches, num_nodes, num_node_features)
with random values between -1 and 1.
"""
if num_batches:
features = random.rand(num_batches, num_nodes, num_node_features)
else:
features = random.rand(num_nodes, num_node_features)
return features * 2.0 - 1.0 | 61a7688d4659078a187960590111e14dc2f04815 | 692,097 |
def author_id_string(aob):
"""
Produce a string representation of an author id
:param aob: author object
:return: string representation of author id
"""
return u"{x}: {y}".format(x=aob.get("type"), y=aob.get("id")) | be8aac97538fc2146a79f4ac53aa35eb6096045d | 692,099 |
def extract_digest_key_date(digest_s3_key):
"""Extract the timestamp portion of a manifest file.
Manifest file names take the following form:
AWSLogs/{account}/CloudTrail-Digest/{region}/{ymd}/{account}_CloudTrail \
-Digest_{region}_{name}_region_{date}.json.gz
"""
return digest_s3_key[-24:-8] | e0264fc30f491685deb177ea3c8d6e483b46537f | 692,101 |
def get_intersection_area(box1, box2):
"""
compute intersection area of box1 and box2 (both are 4 dim box coordinates in [x1, y1, x2, y2] format)
"""
xmin1, ymin1, xmax1, ymax1 = box1
xmin2, ymin2, xmax2, ymax2 = box2
x_overlap = max(0, min(xmax1, xmax2) - max(xmin1, xmin2))
y_overlap = max(0, min(ymax1, ymax2) - max(ymin1, ymin2))
overlap_area = x_overlap * y_overlap
return overlap_area | d3c420597533236a210640f9d4b37a754c6f5a33 | 692,104 |
def hmsm_to_days(hour=0, min=0, sec=0, micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24. | b32b02770fa5bee335e24b8490ae3cb1151130fc | 692,108 |
import curses
def load_keys() -> dict:
"""
Load all keyboard keys available to user in program
Usage: KEYS['DOWN']
Parameters:
None
Returns:
KEYS (dict): Dictionary of references to curses keys
"""
KEYS = {
"ENTER": (curses.KEY_ENTER, ord('\n'), ord('\r')),
"SPACE": (32, ord(' ')),
"UP": (curses.KEY_UP, ord('k')),
"DOWN": (curses.KEY_DOWN, ord('j')),
"RIGHT": (curses.KEY_RIGHT, ord('l')),
"LEFT": (curses.KEY_LEFT, ord('h')),
"PAUSE": (ord('p'), ord('P')),
"RESUME": (ord('r'), ord('R')),
"QUIT": (27, ord('q'), ord('Q'))
}
return KEYS | 3d81af1777db46923c889659522be77d89b178d9 | 692,111 |
def pprTXRecord(rec):
"""Pretty print a TX record"""
return "nsamples={nsamples}".\
format(nsamples=rec.nsamples) | 3c9efe3bdf4f1bd1e1d7740ecba836bcef6e7c84 | 692,112 |
def index_of_(string, sub, start, length):
""":yaql:indexOf
Returns an index of first occurrence sub in string beginning from start
ending with start+length.
-1 is a return value if there is no any occurrence.
:signature: string.indexOf(sub, start, length)
:receiverArg string: input string
:argType string: string
:arg sub: substring to find in string
:argType sub: string
:arg start: index to start search with, 0 by default
:argType start: integer
:arg length: length of string to find substring in
:argType length: integer
:returnType: integer
.. code::
yaql> "cabcdab".indexOf("bc", 2, 2)
2
"""
if start < 0:
start += len(string)
if length < 0:
length = len(string) - start
return string.find(sub, start, start + length) | 3b8992077d8f7fd3ed8851258fb7bd6c7bb10928 | 692,113 |
def ben_type(exp):
""" Given a bencoded expression, returns what type it is. """
if exp[0] == "i":
return int
elif exp[0].isdigit():
return str
elif exp[0] == "l":
return list
elif exp[0] == "d":
return dict | bdb79435145baad9de9a1740aae8e6a880ae2d5c | 692,116 |
def get_producer_map(ssa):
"""
Return dict from versioned blob to (i, j),
where i is index of producer op, j is the index of output of that op.
"""
producer_map = {}
for i in range(len(ssa)):
outputs = ssa[i][1]
for j, outp in enumerate(outputs):
producer_map[outp] = (i, j)
return producer_map | 746b218cd406bbcc3a25924cf86b5ba4d93d1e85 | 692,118 |
from typing import Dict
from typing import Tuple
import collections
def getBinsFromGenomeSize(
genome_dict: Dict[str, int], bin_size: int
) -> Dict[Tuple[str, int, int], int]:
"""Create a dictionary contains all bins of the same size across the genome
Attributes:
binSize: bin size (i.e. 5000)
genomeDict: a dictionary contains chromosome sizes
Return:
A dictionary contains all bins and its index (start from 1)
"""
bin_dict = collections.OrderedDict()
i = 1
for _chrom in genome_dict:
for _start in range(1, genome_dict[_chrom], bin_size):
_end = min(_start + bin_size - 1, genome_dict[_chrom])
_binId = (_chrom, _start, _end)
bin_dict[_binId] = i
i = i + 1
return bin_dict | b05ba53f77359ebbb27d210e398f050b3cbd4fb0 | 692,122 |
def check_multi_location(alignment, tags, log=None):
"""
See if the read was mapped at multiple locations.
if so, it returns True and can be counted in the optional log
:param alignment: the read
:param tags: alignment tags as dict
:return:
"""
if 'XA' in tags:
alignment.is_qcfail = True
if log:
log.multi(alignment)
return True
else:
return False | e8989ee5b5f60b79dbb2780b8c5505c530080929 | 692,123 |
def sign2binary(y, zero_as_plus=False):
"""
Convert signs {-x,x} -> binary values {0,1}
Parameters
----------
y: np.array (n,c) float/int (-inf,inf)
zero_as_plus: bool
if True, convert 0 -> 1, else 0 -> 0
Returns
-------
np.array (n,c) int {0,1}
"""
if zero_as_plus:
return (y >= 0).astype(int)
else:
return (y > 0).astype(int) | 8433e885af244306b43b23318d4c1345732f58ea | 692,125 |
def build_collection_representation(model, description):
"""Enclose collection description into a type-describing block."""
# pylint: disable=protected-access
collection = {
model.__name__: description,
"selfLink": None, # not implemented yet
}
return collection | d0852c0b07f1cb410a94c090dafc9d3048e28d95 | 692,131 |
def int2str(index):
"""Convert well index into a human readable address."""
row = u'ABCDEFGH'[index / 12]
col = (index % 12) + 1
return u'%s%s' % (row, col) | 9af97ae624ee2aa67f01e22da35018b823fe3bed | 692,133 |
def compression_type_of_files(files):
"""Return GZIP or None for the compression type of the files."""
return 'GZIP' if all(f.endswith('.gz') for f in files) else None | d8c471829d277094eb804f28b02692f8b707ca7b | 692,135 |
import requests
import json
def _query_ID_converter(ext_id):
"""
Converts ePMC ext_id into PMID , API description here - https://www.ncbi.nlm.nih.gov/pmc/tools/id-converter-api/
Parameters
----------
ext_id : String
ePMC identifier used to retrieve the relevant entry. Format is prefix of 'PMC'
followed by an integer.
Returns
-------
response_json : dict
json returned by the API containing the relevant information. Can be passed to
:func:`~pyre.convert_PMCID_to_PMID`
See Also
--------
* :func:`~pyre.convert_PMCID_to_PMID`
"""
service_root_url = "https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/?ids="
request_url = service_root_url + ext_id
fmt = "json"
request_url = request_url + r"&format=" + fmt
tool = "pyresid"
request_url = request_url + r"&tool=" + tool
email = "robert.firth@stfc.ac.uk"
request_url = request_url + r"&email=" + email
r = requests.get(request_url)
response_json = json.loads(r.text)
return response_json | c7e19dbe162cdcb22d51e12013f3645f8baf67d1 | 692,138 |
import yaml
def parse_str(string: str):
"""Parse using yaml for true, false, etc."""
try:
string = yaml.safe_load(string)
except Exception:
pass
return string | 37ee0d69fc0b28abd2ba71a81522447efe3c4dbc | 692,139 |
import pkg_resources
def get_data_file_path(rel_path):
"""
Get the path to a data file. Normally these can be found in
`openff/cli/data/` but are likely hidden somewhere else in site-packages
after installation.
Parameters
----------
file_path : str
Name of the file to find the full path of
"""
full_path = pkg_resources.resource_filename("openff.cli", "data/" + rel_path)
return full_path | d76eac8d45b23973f1ae2b97937e3bd740fe17d9 | 692,140 |
def dict_to_title(argmap):
""" Converts a map of the relevant args to a title. """
# python 3, this will be sorted and deterministic.
print(argmap)
return "_".join([k + "=" + v for k, v in argmap.items() if k != "cmd"]) | 67d1a6086bd7e773aefb874c8418082f0ae4d8a2 | 692,141 |
def get_table(content):
"""Get header (list) and body (2d list) from input string"""
content = content.replace('\r', '')
rows = [row.strip().split('\t') for row in content.split('\n')]
header = rows[0]
header_length = len(header)
body = [row[:header_length] for row in rows[1:] if len(row) >= header_length]
return header, body | 5990e218ad9b9bf28f23af082a8be4f48c6bc9ea | 692,144 |
def cagr(dff):
"""calculate Compound Annual Growth Rate for a series and returns a formated string"""
start_bal = dff.iat[0]
end_bal = dff.iat[-1]
planning_time = len(dff) - 1
cagr_result = ((end_bal / start_bal) ** (1 / planning_time)) - 1
return f"{cagr_result:.1%}" | 8e90a0a4feb34281c55657a39217707a5a8b8f43 | 692,145 |
def measure_distance(cell1, cell2):
""" This function maps distances in a cartesian plane of size 10X10 to a
torus of the same size and then measures the Euclidean distance on
the torus.
"""
x1, y1 = cell1.location
x2, y2 = cell2.location
x_dist = abs(x1-x2)
y_dist = abs(y1-y2)
if x_dist > 5:
x_dist = 10-x_dist
if y_dist > 5:
y_dist = 10-y_dist
return (x_dist**2 + y_dist**2)**.5 | 1c57dca72fcca90ad40c4099ee815feb227da3b3 | 692,149 |
import math
def pol2cart(rho, phi):
"""
Convert from polar (rho, phi) to cartesian (x, y) coordinates. phi is in degrees.
"""
x = rho * math.cos(math.radians(phi))
y = rho * math.sin(math.radians(phi))
return(x, y) | f8d3c941ed936e20f1c33c9e81888a55b2b4a4ea | 692,151 |
from typing import Any
import json
def compact_json(obj: Any) -> str:
"""Encode into JSON, but in a more compacted form."""
return json.dumps(obj, separators=(",", ":")) | 0d0ad626eabea97e547f5181a083ba0646c4d83b | 692,152 |
def check_buffering_complete(torrent_client, params=None):
"""
Check if buffering is complete
:return: bool - buffering status
"""
return torrent_client.is_buffering_complete | d18e8c247c2ff98f5440dd0c361b67d36ff16ce8 | 692,154 |
import torch
def decode(loc, dbox_list):
"""
オフセット情報を使い、DBoxをBBoxに変換する。
Parameters
----------
loc: [8732,4]
SSDモデルで推論するオフセット情報。
dbox_list: [8732,4]
DBoxの情報
Returns
-------
boxes : [xmin, ymin, xmax, ymax]
BBoxの情報
"""
# DBoxは[cx, cy, width, height]で格納されている
# locも[Δcx, Δcy, Δwidth, Δheight]で格納されている
# オフセット情報からBBoxを求める
boxes = torch.cat((
dbox_list[:, :2] + loc[:, :2] * 0.1 * dbox_list[:, 2:],
dbox_list[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), dim=1)
# boxesのサイズはtorch.Size([8732, 4])となります
# BBoxの座標情報を[cx, cy, width, height]から[xmin, ymin, xmax, ymax] に
boxes[:, :2] -= boxes[:, 2:] / 2 # 座標(xmin,ymin)へ変換
boxes[:, 2:] += boxes[:, :2] # 座標(xmax,ymax)へ変換
return boxes | 1083cc832aa3d3403d34314155e1d1086f0d05ff | 692,155 |
def safe_is_subclass(subclass, superclass) -> bool:
"""
A clone of :func:`issubclass` that returns ``False`` instead of throwing a :exc:`TypeError`.
.. versionadded:: 1.2
"""
try:
return issubclass(subclass, superclass)
except TypeError:
return False | b574d5a6d4ce871c420e64562260ed3011f74cb3 | 692,156 |
def countLines(filePath: str):
"""Count the number of lines in a text file"""
return sum(1 for _ in open(filePath)) | 1f860194812989518fa91bfcd8a61e9cfb09420c | 692,158 |
def Q_deph(P_mass, r_dist, R):
"""
Calculates the heat load of dephlegmator.
Parameters
----------
P_mass : float
The mass flow rate of dist , [kg/s]
R : float
The reflux number [dimensionless]
r_dist : float
The heat vaporazation of dist, [J/kg]
Returns
-------
Q_deph : float
The heat load of dephlegmator, [W] , [J/s]
References
----------
Дытнерский, формула 2.2, стр.45
"""
return P_mass * (R + 1) * r_dist | ac8dc09d6b0a7513e32c47b4334ba4876de52daf | 692,161 |
def get_image(track_data_list, index_mapping, index):
""" Returns an image from a series of track directories.
Inputs:
track_data_list -- List of TrackData objects.
index_mapping -- total num images by 2 array, with each row
containing a directory index and the image indices for
that directory only.
index -- Single index, less than total number of images in
all directories.
"""
dir_index, orig_index = index_mapping[index]
return track_data_list[dir_index].detection_image(orig_index) | 1fd143e1ae83a6a5dc09cd2791f3cd77717b0e52 | 692,163 |
import hashlib
import json
def subscribe_sqs_queue(sns_client, topic_arn, queueobj):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic_arn: string
:param topic_arn: The ARN of the new topic.
:type queueobj: A boto3 SQS Queue object
:param queueobj: The queue object you wish to subscribe to the SNS Topic.
"""
#q_arn = queue.arn
q_arn = queueobj.attributes['QueueArn']
# t = queue.id.split('/') # '/512686554592/exp-workflow-starter-queue' => exp-workflow-starter-queue
# this is the boto3 equivalent, but `t` is unused
# t = q_arn.rsplit(':', 1)[-1] # arn:aws:sqs:us-east-1:512686554592:exp-workflow-starter-queue => exp-workflow-starter-queue
sid = hashlib.md5((topic_arn + q_arn).encode('utf-8')).hexdigest()
sid_exists = False
# resp = sns_`client.subscribe(topic_arn, 'sqs', q_arn)
resp = sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=q_arn)
#attr = queue.get_attributes('Policy')
# if 'Policy' in attr:
# policy = json.loads(attr['Policy'])
# else:
# policy = {}
policy = queueobj.attributes.get('Policy', {})
if policy:
policy = json.loads(policy)
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic_arn}}}
policy['Statement'].append(statement)
#queue.set_attribute('Policy', json.dumps(policy))
queueobj.set_attributes(Attributes={'Policy': json.dumps(policy)})
return resp | aa3210dc908e2ec334154de40c1f944785dc0496 | 692,165 |
import math
def get_number_format(number_of_pages) -> str:
"""
Get the correct number formatting for pdftoppm's output numbering. E.g. a file with 10-99 pages will have output
images named '01.png, 02.png, 03.png'; a file with 100-999 pages will output '001.png, 002.png, 003.png'. We need
to use the same formatting for reading the images back in (and scanning them)
:param number_of_pages: The total number of pages
:return: A format string (e.g. '{:03}') to use for formatting these page numbers into filenames
"""
formatted_number_length = int(math.log10(number_of_pages)) + 1
return "{:0" + str(formatted_number_length) + "}" | db8f3c205e9763566e20c6ae57fbd34cfc04d42f | 692,167 |
def switch_inbox(conn_, inbox_, **kwargs):
"""
Switch the IMAP connection to a different inbox (= inbox folder)
"""
conn_.select(inbox_)
return conn_ | 5fb457a3e15c072590609fcb15bf46182aee375d | 692,171 |
def xround(x, divisor=1):
"""Round to multiple of given number.
Parameters
----------
x : float
Number to round.
divisor : float
Number the result shall be a multiple of.
Returns
-------
float
`x` rounded to the closest multiple of `divisor`.
"""
return divisor * round(x / divisor) | 650bd15db3192431fd51cb41192050cd124872f2 | 692,175 |
def gc(s):
"""
return the percentage of dna composed of G+C
"""
gc = s.count('G') + s.count('C')
return gc *100 / len(s) | 4cc55f246f4b7ac667be98cc0245f5a8af74be0f | 692,176 |
def parse_boolean(s):
"""Takes a string and returns the equivalent as a boolean value."""
s = s.strip().lower()
if s in ("yes", "true", "on", "1"):
return True
elif s in ("no", "false", "off", "0", "none"):
return False
else:
raise ValueError("Invalid boolean value %r" % s) | f21e04817b63e49f88a9abb57f58ccd5032a9228 | 692,177 |
def _format_result(result):
"""Format result into string for templating."""
# do not include decimal if it's 100
if result == 100:
return "100"
return "{:.1f}".format(result) | fd386b200dac72cd1db3c104b9dc41b774d3a7d0 | 692,178 |
def mime(mime):
"""
Constructs a decorator that sets the preferred mime type
to be written in the http response when returning the
function result.
"""
def dfn(fn):
fn.mime = mime
return fn
return dfn | 0e09e8601ff59cde49dde0ead777d2e9a2651686 | 692,184 |
def remove_well_known_protos(filenames):
"""Remove "well-known" protos for objc and cpp.
On those platforms we get these for free as a part of the protobuf runtime.
We only need them for nanopb.
Args:
filenames: A list of filenames, each naming a .proto file.
Returns:
The filenames with members of google/protobuf removed.
"""
return [f for f in filenames if 'protos/google/protobuf/' not in f] | 44e2872dd03f82bdc83bb83ffbeb3de01fcfdc39 | 692,188 |
import random
def greeting_response(text):
"""
This function returns a random greeting response to a users greeting.
"""
#bots greeting response
bot_greetings = ['hallå', 'hej', 'Hej där']
#user greetings
user_greetings = ['hej', 'hejsan', 'hallå']
for word in text.split():
if word in user_greetings:
return random.choice(bot_greetings) | c62521caf68c24a984faa8d3d6b9888f4224bb0d | 692,189 |
def TargetIndex(targetRow: int, targetColumn: int) -> int:
"""
1 2 3 4 5 6
---+---+---+---+---+---+---+
1 | 1 3 6 10 15 21
2 | 2 5 9 14 20
3 | 4 8 13 19
4 | 7 12 18
5 | 11 17
6 | 16
"""
target_index = (((targetRow + targetColumn - 1) ** 2 +
targetRow + targetColumn - 1) // 2) - (targetRow - 1)
return target_index | 3ad0e7d1932b0b2f8121347ab20d0db9dce18e2c | 692,190 |
import hashlib
import binascii
def hash_password(password, salt, rounds, algorithm):
"""
Hashes the password with the salt and algorithm provided. The supported
algorithms are in PasswordHashParser.valid_algorithms.
Returns just the hash (not the full hash string). Returns None if an error
occurs.
Algorithms using the passlib library are returned in base64 format.
Algorithms using the hashlib library are returned in hex format.
"""
if algorithm == 'pbkdf2_sha256':
# Rounds must be set.
if rounds is None:
return None
result = hashlib.pbkdf2_hmac('sha256',
password.encode(), salt.encode(), rounds)
return binascii.hexlify(result).decode()
elif algorithm == 'md5':
# Rounds is ignored.
return hashlib.md5((salt + password).encode()).hexdigest()
return None | fccb42c6dac294a89d96668ac272eabfba841316 | 692,195 |
def conseq(cond, true, false):
"""
Behaves like the tenary operator.
"""
if cond:
return true
else:
return false | 9ed23d6d9aa6cc93c902247dd7842f4a86675b23 | 692,196 |
from bs4 import BeautifulSoup
import re
import html
def __getCompanyMetadata(parsed: BeautifulSoup) -> dict:
"""Function to extract company Standard Industrial Classification (SIC)
code, SIC type (i.e. description), company location, state of incorporation,
and the end of its fiscal year.
Searches the raw HTML of the company identification section of the page
using regular expressions.
Arguments:
parsed {BeautifulSoup} -- Parsed HTML from company EDGAR filing.
Returns:
dict -- Company metadata with keys `sic`, `sic_type`, `location`,
`incorporation_state`, and `fiscal_year_end`.
"""
# Company metadata container
metadata_container = parsed.find('p', class_='identInfo')
# String representation of HTML (used in RegEx)
metadata_str = str(metadata_container)
# Dictionary for company metadata
company_metadata = dict()
# RegEx for extracting SIC and SIC type
sic_re = re.compile(r'SIC.+?:.+?(\d+?)<\/a> -(.+?)<br')
# Getting SIC and SIC type match
sic_matches = sic_re.findall(metadata_str)
# Saving SIC and stripped, HTML-parsed SIC type
company_metadata['sic'] = sic_matches[0][0]
company_metadata['sic_type'] = html.unescape(sic_matches[0][1]).strip()
# RegEx for extracting company location (state)
location_re = re.compile(r'State location:.+?>(\w+?)<\/a>')
# Getting company location
location_matches = location_re.findall(metadata_str)
# Saving company location
company_metadata['location'] = location_matches[0].strip()
# RegEx for extracting state of incorporation
incorp_state_re = re.compile(r'State of Inc\.:.+?>(\w+?)<\/strong>')
# Getting state of incorporation
incorp_match = incorp_state_re.findall(metadata_str)[0]
# Saving state of incorporation
company_metadata['incorporation_state'] = incorp_match.strip()
# RegEx for extracting end of fiscal year
fiscal_year_re = re.compile(r'Fiscal Year End:.+?(\d{4})')
# Getting end of fiscal year
fiscal_year_match = fiscal_year_re.findall(metadata_str)[0]
# Saving end of fiscal year (in mm-dd format)
fy_formatted = fiscal_year_match[0:2] + '-' + fiscal_year_match[2:]
company_metadata['fiscal_year_end'] = fy_formatted
return company_metadata | a9efbed062f8e6f9f43ba46d6753096df3c43e08 | 692,199 |
def is_form_persisted(form):
"""
Does the form have a model instance attached and it's not being added?
e.g. The form is about an existing Subgoal whose data is being edited.
"""
if form.instance and not form.instance._state.adding:
return True
else:
# Either the form has no instance attached or
# it has an instance that is being added.
return False | 621135c4a7577c42d79095881802a87bf5b44c77 | 692,200 |
def get_version(rel_path):
"""Given a path to a Python init file, return the version string."""
with open(rel_path, "r") as openfile:
lines = openfile.readlines()
for line in lines:
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.") | a2255a39130f9c5c0c4a1cf50a30b60c6673e174 | 692,202 |
import lzma
def compress_lzma(data: bytes) -> bytes:
"""compresses data via lzma (unity specific)
The current static settings may not be the best solution,
but they are the most commonly used values and should therefore be enough for the time being.
:param data: uncompressed data
:type data: bytes
:return: compressed data
:rtype: bytes
"""
ec = lzma.LZMACompressor(
format=lzma.FORMAT_RAW,
filters=[
{"id": lzma.FILTER_LZMA1, "dict_size": 524288, "lc": 3, "lp": 0, "pb": 2, }
],
)
ec.compress(data)
return b"]\x00\x00\x08\x00" + ec.flush() | 06a476a2753be7d591051b9707cf5dbf58556086 | 692,205 |
def relatice_percent_difference(x,y):
"""
Calculates the relative percent difference of 2 numbers
|x-y|/avg(x,y) * 100
"""
average = abs(x + y) / 2
rpd = abs(x - y) / average * 100
return rpd | 1573d89ad38672db2a8e3665c307f98e5a9583db | 692,206 |
import re
def unixify_string(string):
"""
Sanitizes a string making it nice for unix by processing special characters.
Removes: ()!? and spaces,
Replaces with underscores: '/' and ' '
Parameters
----------
string : str
the string to sanitize
Returns
-------
str
the sanitized string
"""
return re.sub("['/]", '_', re.sub('[()!?,]', '', string)).replace(' ', '_') | 2231f911e4653ccae2363881cf1a94a5fb2cf366 | 692,209 |
def getCounts(IDandRatingsTuple):
""" Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (MovieID, number of ratings)
"""
return (IDandRatingsTuple[0], len(IDandRatingsTuple[1])) | c30f15a0404ba1ac54f003efedc05f8716dc3aac | 692,213 |
from typing import List
from typing import Any
def firstOrNone(list: List[Any]) -> Any:
"""Return the first element of a list or None
if 1) it is not set or 2) it is falsey"""
try:
return list[0]
except:
return None | c791c6dc996e8684633e14c660c4279064973be4 | 692,214 |
def numeric_validator(value):
"""Validator for numeric values."""
return isinstance(float(value), float) or isinstance(int(value), int) | 6181cd46aecef9f4a01576fa70a75428b4642f94 | 692,216 |
def parse_message(message):
"""
!meme [meme name]; [(optional) text1]; [(optional) text2]
"""
args = []
template, top, bot = '', '', ''
try:
args = message.split('!meme')[1].split(';')
print(args)
cnt = len(args)
if cnt >= 1:
template = args[0].lstrip().split(' ')[0]
if cnt >= 1:
top = args[0].lstrip().split(' ')[1]
if cnt >= 2:
bot = args[1]
return {'template': template, 'top': top, 'bot': bot}
except Exception as e:
return False | 4c868460283d34df93ec2db6b49cc90e3e5d63c7 | 692,219 |
def parse_market(raw_market, sym_1, sym_2):
"""
>>> raw_market = 'BTCETH'
>>> parse_market(raw_market, 'ETH', 'BTC')
('BTC', 'ETH')
>>> parse_market("QTMBTC", 'QTM', 'BTC')
('QTM', 'BTC')
"""
if sym_1 not in raw_market or sym_2 not in raw_market:
return None
elif raw_market[0:len(sym_1)] == sym_1:
return (sym_1, sym_2)
else:
return (sym_2, sym_1) | 226b66b3bcba111ced863ea70313090c545da399 | 692,220 |
def get_nested_compat_files(compat_api_versions):
"""Return __init__.py file paths for files under nested compat modules.
A nested compat module contains two __init__.py files:
1. compat/vN/compat/vK/__init__.py
2. compat/vN/compat/vK/compat/__init__.py
Args:
compat_api_versions: list of compat versions.
Returns:
List of __init__.py file paths to include under nested compat modules.
"""
files = []
for v in compat_api_versions:
files.extend([
"compat/v%d/compat/v%d/__init__.py" % (v, sv)
for sv in compat_api_versions
])
files.extend([
"compat/v%d/compat/v%d/compat/__init__.py" % (v, sv)
for sv in compat_api_versions
])
return files | da66d00b51192aa31059284f478ef9d743e72867 | 692,224 |
def get_dispatch(data):
"""
Returns the dispatch type.
This will help determine how to parse the record
"""
return data[1] | b29baa671b9b4f9418be0c0adc6606a162b4b0cd | 692,227 |
def concatenated(lst, element):
"""
concatenates `element` to `lst` and
returns lst
"""
lst.append(element)
return lst | 66b68bc4c043c8cd223f8f39766c01ec6f39ddc4 | 692,233 |
def make_data(img_names: list, labels: list) -> list:
"""Format data appropriately for Pandas DataFrame.
Args:
img_names (list): Names of images.
labels (list): Labels for images from dataset.
Returns:
list: List containing appropriate information for DataFrame.
"""
rows = []
for id_, img_name in enumerate(img_names):
for label in labels[id_]:
row = [img_name.item()]
row.extend(label)
rows.append(row)
return rows | cc9bdd97f1640b4c6293c452db628b5859ad6f2f | 692,237 |
import importlib
def module_available(module_name: str) -> bool:
"""check whether a python module is available
Args:
module_name (str): The name of the module
Returns:
`True` if the module can be imported and `False` otherwise
"""
try:
importlib.import_module(module_name)
except ImportError:
return False
else:
return True | 21f6eca1cac908cda5bb099b3e7b7fb074463af8 | 692,240 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.