content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def validate_vector(obj, throwerr=False):
"""
Given an object obj, check if it is a valid raw representation of a real
mathematical vector. An accepted object would:
1. be a Python list or Python tuple
2. be 2 or 3 items in length
:param obj: Test subject
:param throwerr: Raise an error if the check returns false.
:return: True if obj is a valid raw representation of mathematical vectors,
False otherwise
"""
if isinstance(obj, (list, tuple)) and 1 < len(obj) < 4:
return True
else:
if throwerr:
raise TypeError("A given object is not an accepted representation"
" of a vector (must be a Python list or tuple)")
return False | dc223fe1541458d5b77d4cbf71284a06365cc2b5 | 685,673 |
from typing import Callable
import inspect
def get_function_name(function: Callable) -> str:
"""Get the proper name of a function as string
Parameters
----------
function : Callable
function to get the name from
Returns
-------
name : str
"""
name = function.__module__ + "." + function.__name__
for cls in inspect.getmro(function.__class__):
if function.__name__ in cls.__dict__:
name += cls.__name__ + name
return name | cd07a328dacf96de51738230b98f5db7f8ca2f7a | 685,680 |
def check_member_in_project(project, user):
"""
Check user whether or not in project.
"""
if user.is_staff:
return True
if user in project.members.all():
return True
return False | 2c55b3980bf84a2302f8ce64d938c332248d5540 | 685,681 |
def factor_n(n):
"""
Converts input n to the form 2^exp * mult + 1, where mult is the greatest odd
divisor of n - 1, and returns mult and exp.
"""
assert n >= 3 and n % 2 != 0, "n must be an odd integer > 2"
mult = n - 1
exp = 0
while mult % 2 == 0:
mult //= 2
exp += 1
return mult, exp | ff5ef9da757934dfe98eff62a1614a4d9567b777 | 685,682 |
def get_sym(pair):
"""
reformats a tuple of (team, rival) so that the first element is smaller than the other
(by convention, this is how we represent a match without home-away status).
"""
if pair[0] > pair[1]:
return pair[1], pair[0]
else:
return pair | 7ab7edbfe2aebf3c76bf7182511a3a4896130278 | 685,683 |
def fromPoint3d( pt ):
"""Converts a Point3d to a tuple"""
if pt is None: return None
return (pt.x, pt.y, pt.z) | 834a73fb04c0d66932dc4897f7083ff88efc3dfd | 685,686 |
def acidity (NaOH_volume, NaOH_molarity, NaOH_fc, honey_solution_concentration, honey_solution_volume):
"""
Function to get acidity in honey - meq/kg
"""
mili_eq_NaOH = NaOH_volume * NaOH_molarity * NaOH_fc
grams_of_honey = (honey_solution_concentration * honey_solution_volume) / 100
acidity = (mili_eq_NaOH * 1000) / grams_of_honey
return acidity | d8854838939569a8f74db33cec73c46d9f487200 | 685,695 |
def parse_node_coverage(line):
# S s34 CGTGACT LN:i:7 SN:Z:1 SO:i:122101 SR:i:0 dc:f:0
# "nodeid","nodelen","chromo","pos","rrank",assemb
"""
Parse the gaf alignment
Input: line from gaf alignment
Output: tuple of nodeid, nodelen, start_chromo, start_pos, coverage
"""
line_comp = line.strip().split()
nodeid = line_comp[1]
nodelen = len(line_comp[2])
start_chromo = line_comp[4].split(":")[2]
start_pos = line_comp[5].split(":")[2]
rrank = line_comp[-2].split(":")[2]
coverage = line_comp[-1].split(":")[2]
return nodeid, nodelen, start_chromo, start_pos, rrank, coverage | b8d2c5eaad33fdee0e9f004982ec9a27f2a40726 | 685,700 |
import re
def apply_formatting(locator, args):
"""Apply formatting to the locator
If there are no named fields in the locator this is just a simple
call to .format. However, some locators have named fields, and we
don't support named arguments to keep the syntax simple, so we
need to map positional arguments to named arguments before calling
.format.
Example:
Given the locator "//*[a[@title='{title}'] or
button[@name='{title}']]//{tag}" and args of ['foo', 'bar'], we'll
pop 'foo' and 'bar' off of the argument list and assign them to
the kwargs keys 'title' and 'tag'.
"""
kwargs = {}
for match in re.finditer(r"\{([^}]+)\}", locator):
name = match.group(1)
if name and name not in kwargs:
kwargs[name] = args.pop(0)
return locator.format(*args, **kwargs) | 795912b5378819fe2dc4d2bb9c4f59b33e4efd2f | 685,703 |
def direction_name(angle):
"""
Returns a name for a direction given in degrees.
Example: direction_name(0.0) returns "N"
direction_name(90.0) returns "E"
direction_name(152.0) returns "SSE".
"""
direction_names = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S",
"SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
directions_num = len(direction_names)
directions_step = 360. / directions_num
index = int(round(angle / directions_step))
index %= directions_num
return direction_names[index] | 419a4d3fec9e977c84fad84bd6d09441f66e9ca9 | 685,704 |
def make_query(specific_table, offset):
"""
Generate a query to retrieve data from database.
:param specific_table: Name of table to retrieve data from.
:param offset: Optional offset to start from.
"""
query = 'select DISTINCT * from `{}`'.format(specific_table)
if isinstance(offset, int):
unlimited = "18446744073709551615"
# workaround for MySQL requiring a limit when using offset
query += " LIMIT {}, {};".format(str(offset), unlimited)
return query | e039ecb8fe340c51a80bc919bf368f0b4ceda508 | 685,707 |
def is_builtin_reducer(fn_reducer):
"""
Returns True if fn_reducer specifies the built-in reducer.
"""
return (fn_reducer == 'aggregate') | 0133273062d1661a7e60be1c11eb336825008ce6 | 685,715 |
def format_code(entry):
"""
Formats the viewing of code and errors
"""
code = ""
for line in entry.split('\n'):
code += "\n|G>>>|n %s" % line
return code.strip() | dfcc4482493333a91e0877a30dd04adc10edd00e | 685,716 |
from typing import Union
def _is_dict(data: Union[dict, list]) -> bool:
"""
Checks whether provided data structure is a dictionary or not.
:param data: The data to check
:return: true if is dict, false otherwise
"""
if isinstance(data, dict):
return True
return False | 77ff0ac36c85700e0b9a32172ccd8f426419c848 | 685,719 |
def flatten(data):
"""Returns a flattened version of a list.
Courtesy of https://stackoverflow.com/a/12472564
Args:
data (`tuple` or `list`): Input data
Returns:
`list`
"""
if not data:
return data
if type(data[0]) in (list, tuple):
return list(flatten(data[0])) + list(flatten(data[1:]))
return list(data[:1]) + list(flatten(data[1:])) | f105493b23bdd08560e5a6a60bb59966a462f69a | 685,720 |
def clip(min, val, max):
"""
Returns `val` clipped to `min` and `max`.
"""
return min if val < min else max if val > max else val | b80e8e79e6ae8e0eea25e221c3bee13097da240c | 685,725 |
def unique(lst):
"""
Returns a list made up of the unique values found in lst. i.e., it
removes the redundant values in lst.
"""
lst = lst[:]
unique_lst = []
# Cycle through the list and add each value to the unique list only once.
for item in lst:
if unique_lst.count(item) <= 0:
unique_lst.append(item)
# Return the list with all redundant values removed.
return unique_lst | ea7da0316b397147b1f32f59e8bca14a2d861b47 | 685,730 |
from typing import List
def linear_search(arr: List[int], target: int) -> int:
"""
:param arr: Array of integers (may be sorted or unsorted)
:param target: integer we want to find
:return: position of the target in the array, -1 if not found
"""
for i in range(0, len(arr)):
if arr[i] == target:
return i + 1
return -1 | f6019243aa777d541ff1c1dfeccae6ba90b30590 | 685,736 |
import yaml
def load_local_paths(path):
"""
Load local paths .yaml file.
Parameters
==========
path : str
Path to .env file.
Returns
=======
local_paths : dict
Dictionary with environmental variables from .yaml file.
"""
with open(path, 'r') as f:
try:
local_paths = yaml.safe_load(f)
except yaml.YAMLError as err:
print(err)
return None
return local_paths | 07923813202526eb1ecad4b636565458a6ed140c | 685,737 |
def mass_from_column_name(mass):
"""Return the PVMassSpec mass 'M<x>' given the column name '<x>_amu' as string"""
return f"M{mass[:-4]}" | dc769442c3c276282f7e1ae76585677ef0b5bc10 | 685,739 |
def gram_align_right(s, line_width: int = 20):
"""
Format for telegram, align right.
:param s: input text
:param int line_width: Width
:return: str
"""
return '`{}`'.format(str(s).rjust(line_width)) | 5b317c7145d9736dff5c7df167603f018d446f47 | 685,742 |
import torch
from typing import Tuple
def get_idxs_tuple(idxs: torch.Tensor
) -> Tuple[torch.Tensor, ...]:
"""
@param idxs - Shape (num indices, num dimensions that the indices index into).
@returns - A tuple, which can be directly used to index into a tensor, eg tensor[idxs_tuple].
The tuple has (num dimensions that the indices index into) tensors, where each tensor has
(num indices) elements.
"""
return tuple(torch.transpose(idxs, 0, 1)) | b61318711c11d516295081276c11e0236a7c9520 | 685,744 |
from typing import Set
from typing import Dict
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]:
""" Creates a mapping from indices to labels. """
return {index: label for index, label in
enumerate(["pad"] + sorted(list(labels)))} | def903371ae37f33db1a0e4db064c4f9d68c9531 | 685,747 |
def pstring(state, num):
"""Return a nice string give a state and its count.
Example:
>>> pstring(X(0,-1,1),4)
( 0, *, 1) : 4
"""
a,b,c = state
if b == -1:
b = " *"
return f"({a:2},{b:2},{c:2}) : {num:2}" | 3af8fe9b35d43dbca4f03b98c0f43721c6822203 | 685,749 |
import requests
from typing import Optional
def size_from_headers(response: requests.Response) -> Optional[int]:
"""
Return the size of the download based on the response headers.
Arguments:
response {requests.Response} -- the response
Returns:
Optional[int] -- the size
"""
if "Content-Length" in response.headers:
return int(response.headers["Content-Length"])
return None | 9b55a6edc6458380d46ac6a6116b0f5cc804c3b8 | 685,755 |
def try_or_none(f):
"""wraps f to return None if f raises an exception
assumes f takes only one input"""
def f_or_none(x):
try:
return f(x)
except:
return None
return f_or_none | 543987e698fbd10855a1cec527b46a52e0f5e0ac | 685,756 |
def GetAllLengths( tbl_nrdb, nids ):
"""retrieve all lengths for a set of nids.
"""
lengths = []
for nid in nids:
lengths.append( tbl_nrdb.GetLength( nid ))
return lengths | 70a762f1b6349df255cc377cd60c8c63ee0bbc47 | 685,759 |
def find_lambda_jit(target, source):
"""
Finds the longest subsequence of the target array,
starting from index 0, that is contained in the source array.
Returns the length of that subsequence + 1.
i.e. returns the length of the shortest subsequence starting at 0
that has not previously appeared.
Args:
target: NumPy array, perferable of type int.
source: NumPy array, perferable of type int.
Returns:
Integer of the length.
"""
source_size = source.shape[0]-1
target_size = target.shape[0]-1
t_max = 0
c_max = 0
for si in range(0, source_size+1):
if source[si] == target[0]:
c_max = 1
for ei in range(1,min(target_size, source_size - si+1)):
if(source[si+ei] != target[ei]):
break
else:
c_max = c_max+1
# if(si+ei>=source_size):
# break
if c_max > t_max:
t_max = c_max
return t_max+1 | 32bf57d78834bd78d51480dfa1f16eae24f148f6 | 685,766 |
def getPlayer(f,columns,x):
"""Return the dataframe of a specific player with subject ID x"""
return f[f['Subject'].isin([x])] | ba1b7a3671376158d9662df9d4123f9e3dec3c8f | 685,770 |
def _get_tmpdir_fixture_name(scope: str) -> str:
"""Get appropriately-scoped tmpdir fixture."""
if scope == 'session':
return 'session_tmpdir_path'
else:
return 'tmpdir_path' | d4ad8c41cda756d08f4afb52f3276e7c01379471 | 685,771 |
def strip_comments(s):
"""Strips comment lines and docstring from Python source string."""
o = ''
in_docstring = False
for l in s.split('\n'):
if l.strip().startswith(('#', '"', "'")) or in_docstring:
in_docstring = l.strip().startswith(('"""', "'''")) + in_docstring == 1
continue
o += l + '\n'
return o | 8adeecc8181ae69da94271025c23f6a5692589fa | 685,772 |
def find_keywords_sentences(keywords, cleaned_lines):
"""
Find the occurrences of the keywords in the text.
:param keywords: The keywords to be searched in the lines
:param cleaned_lines: The lines of the document
:return: The lines with the given keywords
"""
accepted_lines = list()
for cleaned_line in cleaned_lines:
for keyword in keywords:
if keyword.lower() in cleaned_line.lower():
accepted_lines.append(cleaned_line)
return list(set(accepted_lines)) | d22bfdb40bc5cb9b27dd021eff519a05026e017e | 685,773 |
def resolveArgPrefix(function) -> str:
"""
Resolve the command line prefix for a function.
:param function: The function to resolve.
:return: The value of the '_func' property set by @ArgMap or the function name if not set.
"""
# Handle _func in argMap
prefix = function.__name__ + ':'
if hasattr(function, '__argMap__') and '_func' in function.__argMap__ and function.__argMap__['_func'] is not None:
prefix = function.__argMap__['_func']
if prefix != '':
prefix += ':'
return prefix | 627095545f7101cd1b699d29a294f450fe089e7b | 685,775 |
from bs4 import BeautifulSoup
import requests
def download_html(url: str) -> BeautifulSoup:
"""The download_html method downloads the html tree based on the argument `url`.
Args:
url (str): the url that users want to scrap
Returns:
a BeautifulSoup object
"""
res = requests.get(url)
res.encoding = "utf-8"
soup = BeautifulSoup(res.text, "lxml")
return soup | bd436ed76d84786259329604c3c2954746817f7e | 685,777 |
def check_output_format(expected_formats):
"""
Decorator for stream outputs that checks the format of the outputs after modifiers have been applied
:param expected_formats: The expected output formats
:type expected_formats: tuple, set
:return: the decorator
"""
def output_format_decorator(func):
def func_wrapper(*args, **kwargs):
self = args[0]
if self.output_format not in expected_formats:
raise ValueError("expected output format {}, got {}".format('doc_gen', self.output_format))
return func(*args, **kwargs)
return func_wrapper
return output_format_decorator | 8cfcca474d3d008835f9cd722cfc9f567c7a53da | 685,779 |
def get_hex(num, digits=2):
"""
Convert an integer to a hex string of 'digit' characters.
Args:
num (int): The value to be converted.
digits (int): The number of characters the final string should comprise. Default: 2.
Returns:
str: The hex string.
"""
format_str = "{:0" + str(digits) + "X}"
return format_str.format(num) | 5062523cbc4c6064f8e7a509eee5147590cdb9c7 | 685,783 |
from typing import Dict
from typing import Callable
from datetime import datetime
from typing import Tuple
def season_binner(rules: Dict[int, str]) -> Callable[[datetime], str]:
"""
Construct mapping from datetime to a string in the form like 2010-06--P3M
:param rules: Is a mapping from month (1-Jan, 2-Feb) to a string in the
form "{month:int}--P{N:int}M", where ``month`` is a starting
month of the season and ``N`` is a duration of the season in
months.
"""
_rules: Dict[int, Tuple[str, int]] = {}
for month in range(1, 12 + 1):
season = rules.get(month, "")
if season == "":
_rules[month] = ("", 0)
else:
start_month = int(season.split("--")[0])
_rules[month] = (season, 0 if start_month <= month else -1)
def label(dt: datetime) -> str:
season, yoffset = _rules[dt.month]
if season == "":
return ""
y = dt.year + yoffset
return f"{y}-{season}"
return label | 41bfa2593ec9b3555622d4b373a1020a7fca7928 | 685,785 |
from datetime import datetime
def l2l_dt(value):
"""
Detects if value is a datetime object. If it is, object is converted to a string that is formatted '%Y-%m-%d %H:%M:%S'.
If value is a string, we convert it to a datetime object and then perform the step mentioned above.
"""
format = "%Y-%m-%d %H:%M:%S"
if isinstance(value, datetime):
return value.strftime(format)
elif isinstance(value, str):
dt = datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
return dt.strftime(format)
else:
return "Hmm, seems something went wrong. We'll take a look at this ASAP"
# Implement some sort of notifier here so devs know that a customer broke it :D | 37dd6b38763ac4a85f144295b5744d735e98b4a1 | 685,787 |
def _enum_to_int(value):
"""Convert an IntEnum member to a numeric value.
If it's not an IntEnum member return the value itself.
"""
try:
return int(value)
except (ValueError, TypeError):
return value | 4119801cf1b2ea891b1225e56aeec5a1d78bd4b1 | 685,790 |
def locate(actuator, *extra_args, **extra_kwargs):
"""
Example target function for example-locate
Any parameter of an OpenC2-Command message can be used as an argument [action, actuator, args, id as cmd_id, target]
Target will be the contents of the target object
:param actuator: the instance of the actuator that called the function
:param extra_args: positional arguments passed to the function - list
:param extra_kwargs: keyword arguments passed to the function - dict
:return: OpenC2 response message - dict
"""
return dict(
status=400,
status_text='this is an example action, it returns this message'
) | 063481298060ec4909cafbc163efff6b212a55f6 | 685,793 |
import struct
def pack_dint(n):
"""pack 32 bit into 4 bytes little endian"""
return struct.pack('<i', n) | 88f7410e6b3fdb9a6e724d6ed64f4fef12030749 | 685,795 |
def truncate(toTruncate, charsToKeep=50):
"""
Returns a string truncated to 'charsToKeep' length plus ellipses.
"""
if len(toTruncate) > charsToKeep:
truncated = toTruncate[:charsToKeep] + "..."
return truncated
else:
return toTruncate | 8dd75b9f2c24c895deb8d988ed16d41539473c88 | 685,796 |
import math
def compute_distance(x1, y1, x2, y2):
"""
function to compute distance between points
Inputs:
x1: x-coordinate of point1
y1: y-coordinate of point1
x2: x-coordinate of point2
y2: y-coordinate of point2
Outputs:
dist: distance between 2 points
"""
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist | 9d916849c6370cdb458726196d64a5754afc9dad | 685,797 |
def mock_ingest_queue(mocker):
"""
Mock the IngestRun queue.
"""
ingest_queue = mocker.patch(
"creator.ingest_runs.mutations.ingest_run.IngestRun.queue",
)
return ingest_queue | cf5b1c50483990ec382b52eff5dd14fd86c5fa18 | 685,798 |
import random
def random_subset(seq, n):
"""
Return n unique elements from seq.
"""
res = set()
while len(res) < n:
x = random.choice(seq)
res.add(x)
return res | 7954b3fdd9bb6a4d831845598a473965662aa27e | 685,804 |
def rpad(ls, size, val):
"""Right-pads a list with a prescribed value to a set length."""
return ls + (size - len(ls))*[val] | 9cbc1a2e925efd90c75ead2a108ff19436441682 | 685,809 |
def join_by_comma(iterable, key=None):
"""
Helper to create a comma separated label out of an iterable.
:param iterable: an iterable to be joined by comma
:param key: if the iterable contains dicts, which key would you want to extract
:return: comma separated string
"""
if key:
things_to_join = (item.get(key) for item in iterable)
return ", ".join(things_to_join)
else:
return ", ".join(iterable) | fed4f83029f8e055b394452fc195663a8bf99117 | 685,812 |
def resolve_cyvcf2_genotype(cyvcf2_gt):
"""
Given a genotype given by cyvcf2, translate this to a valid
genotype string.
Args:
cyvcf2_gt (cyvcf2.variant.genotypes)
Returns:
genotype (str)
"""
if cyvcf2_gt[2]:
separator = "|"
else:
separator = "/"
if cyvcf2_gt[0] == -1:
a_1 = "."
else:
a_1 = str(cyvcf2_gt[0])
if cyvcf2_gt[1] == -1:
a_2 = "."
else:
a_2 = str(cyvcf2_gt[1])
genotype = a_1 + separator + a_2
return genotype | 7964d034dbbc7fc5613b334a8c297770f30adc1a | 685,818 |
def find_start_end(case, control):
"""
Find the measurement start and end of a control, treatment pair.
:param case: The treatment ExperimentalCondition
:param controL: The control ExperimentalCondition
:return a [tuple]:
- the start index point
- the end index point
"""
if control is None:
start = case.find_start_date_index()
end = case.measurement_end
else:
start = max(case.find_start_date_index(), control.measurement_start)
end = min(case.measurement_end, control.measurement_end)
return start, end | 187f665cea0243ad45491becf98b1e0f4fe2a16b | 685,820 |
def is_in_prereg_group(user):
"""Determines whether a user is in the prereg_group
:param user: User wanting access to prereg material
:return: True if prereg False if not
"""
return user.is_in_group('prereg_group') | bd5fbf934ec29720966e203c0b12ec7bab02aeb4 | 685,823 |
def get_message_type(type_in_bytes):
"""Return the type of a binary message as a string."""
return type_in_bytes.decode('ascii') | 5ce0de9493de447092fc55e4891b46cf20f34bdf | 685,825 |
import json
def get_r_version(path):
"""Get the version of R specified in the renv.lock file
Parameters
----------
path : str
Path to the project directory. The file path/renv.lock must exist.
Returns
-------
str
R version
"""
with open(f"{path}/renv.lock", "r") as f:
r_json = json.load(f)
return r_json["R"]["Version"] | 16a5157aa78cacaad946a9df18a580851a4ef5da | 685,826 |
import string
def is_pangram(text: str) -> bool:
"""Determine if text is a pangram.
..note:: A pangram is a string that contains every single letter of the \
alphabet at least once (case is irrelevant).
:param text: text to evaluate
:return: True if text is a pangram
"""
return set(string.ascii_lowercase) <= set(text.lower()) | a74ad9de080077474735d9242beee33355750988 | 685,827 |
from typing import Union
from typing import Mapping
from typing import Sequence
from typing import Any
def get_nested(obj: Union[Mapping, Sequence], path: Sequence) -> Any:
"""Get element of a sequence or map based on multiple nested keys.
Args:
obj: Object to index from
path: Sequence of nested keys.
Example:
>>> get_nested({'a': {'b': [1, 2]}}, ['a', 'b', 0])
1
"""
result = obj
traversed = []
for subpath in path:
traversed.append(subpath)
try:
result = result[subpath]
except (KeyError, IndexError, TypeError) as e:
raise type(e)(str(e.args[0]) + " at nested path: {}".format(traversed))
return result | ebce08c9323861c3edcbeb520efe512fa412be1c | 685,829 |
import gzip
import json
def load_gzipped_jsonl(filename: str, encoding: str = 'UTF-8') -> dict:
"""
Function loads data stored in gzipped JSON Lines.
Parameters
----------
filename : str
Path to the file.
encoding : str, default = 'utf-8'
Returns
-------
: dict
Python dictionary with unique records.
"""
datadict = {}
with gzip.open(filename, 'rt', encoding=encoding) as fstream:
for fline in fstream:
datadict.update(json.loads(fline))
return datadict | b610ceb8aecf8fe704b84ead0e296ea2cbb33923 | 685,831 |
def _merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
See http://stackoverflow.com/a/26853961/2680824
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result | 04e4b2bf97cd7d38828cc47e5bcc25c9bb952a52 | 685,832 |
def timestamptz_to_unix(timestamptz):
"""
Converts timestamp with time zone to epoch
"""
return timestamptz.timestamp() | 1c0e7639815dbb817872f5c7e82273d3e8de0ef2 | 685,838 |
def calc_bpm(peaks, fs):
"""Calculates average HR based on array of peaks found in ECG strip
Args:
peaks (ndarray): array of indices corresponding to location of R peaks
fs (float): sampling frequency of ECG strip
Returns:
float: average heart rate in bpm
"""
sec_per_beat = ((peaks[-1] - peaks[0])/len(peaks))/fs
mean_hr_bpm = 60/sec_per_beat
return mean_hr_bpm | e214141c26158ad0cba0f79999c8e15a4edc737d | 685,839 |
def inv_price_sigmoid(forecast_price, w_param, m_bet_size):
"""
Part of SNIPPET 10.4
Calculates the inverse of the bet size with respect to the market price.
Based on a sigmoid function for a bet size algorithm.
:param forecast_price: (float) Forecast price.
:param w_param: (float) Coefficient regulating the width of the bet size function.
:param m_bet_size: (float) Bet size.
:return: (float) Inverse of bet size with respect to market price.
"""
return forecast_price - m_bet_size * (w_param / (1 - m_bet_size**2))**0.5 | cb0c15643c73d29e75547ba273d74ba3587fe88c | 685,841 |
from typing import Dict
def _sum_expectation_values(
expectation_values_per_bitstring: Dict[str, float],
probability_per_bitstring: Dict[str, float],
alpha: float,
) -> float:
"""Returns the cumulative sum of expectation values until the cumulative probability of bitstrings
s_k = p(x_1) + … + p(x_k) >= alpha
Args:
expectation_values_per_bitstring: dictionary of bitstrings and their corresponding expectation values.
probability_per_bitstring: dictionary of bitstrings and their corresponding expectation probabilities.
alpha: see description in the `__call__()` method.
"""
# Sorts expectation values by values.
sorted_expectation_values_per_bitstring_list = sorted(
expectation_values_per_bitstring.items(), key=lambda item: item[1]
)
cumulative_prob = 0.0
cumulative_value = 0.0
# Sums expectation values for each bitstring, starting from the one with the smallest one.
# When the cumulative probability associated with these bitstrings is higher than alpha,
# it stops and effectively discards all the remaining values.
for bitstring, expectation_value in sorted_expectation_values_per_bitstring_list:
prob = probability_per_bitstring[bitstring]
if cumulative_prob + prob < alpha:
cumulative_prob += prob
cumulative_value += prob * expectation_value
else:
cumulative_value += (alpha - cumulative_prob) * expectation_value
break
final_value = cumulative_value / alpha
return final_value | b2527ea7b70eb862467ec797a6eed4642423c091 | 685,843 |
def is_auto_primary_key(primary_key: bool, autoincrement: bool) -> bool:
"""
Checks if field is an autoincrement pk -> if yes it's optional.
:param primary_key: flag if field is a pk field
:type primary_key: bool
:param autoincrement: flag if field should be autoincrement
:type autoincrement: bool
:return: result of the check
:rtype: bool
"""
return primary_key and autoincrement | 3c3c06b9b7e3453cb5078e9c4e5d2355724d1dad | 685,844 |
def create_primes(threshold):
"""
Generate prime values using sieve of Eratosthenes method.
Args:
threshold (int):
The upper bound for the size of the prime values.
Returns (List[int]):
All primes from 2 and up to ``threshold``.
"""
if threshold == 2:
return [2]
elif threshold < 2:
return []
numbers = list(range(3, threshold+1, 2))
root_of_threshold = threshold ** 0.5
half = int((threshold+1)/2-1)
idx = 0
counter = 3
while counter <= root_of_threshold:
if numbers[idx]:
idy = int((counter*counter-3)/2)
numbers[idy] = 0
while idy < half:
numbers[idy] = 0
idy += counter
idx += 1
counter = 2*idx+3
return [2] + [number for number in numbers if number] | eeb5f1b389163cc6861a363796c1cfc1f6ee3ae3 | 685,845 |
import pathlib
import shutil
def prep_dir(dir_path, clobber=False):
"""Create (or delete and recreate) a directory.
Args:
dir_path (path-like): path to the directory that you are trying to
clean and prepare.
clobber (bool): If True and dir_path exists, it will be removed and
replaced with a new, empty directory.
Raises:
FileExistsError: if a file or directory already exists at dir_path.
Returns:
pathlib.Path: Path to the created directory.
"""
dir_path = pathlib.Path(dir_path)
if dir_path.exists():
if clobber:
shutil.rmtree(dir_path)
else:
raise FileExistsError(f"{dir_path} exists and clobber is {clobber}")
dir_path.mkdir(parents=True)
return dir_path | 6af0481a6f66935812cd0e872c579807ad0fc3c7 | 685,846 |
from typing import Tuple
from typing import Optional
def normalize_parameter(parameter: str, expression: str) -> Tuple[Optional[str], str, str]:
"""Normalize runtime expressions.
Runtime expressions may have parameter names prefixed with their location - `path.id`.
At the same time, parameters could be defined without a prefix - `id`.
We need to normalize all parameters to the same form to simplify working with them.
"""
try:
# The parameter name is prefixed with its location. Example: `path.id`
location, name = tuple(parameter.split("."))
return location, name, expression
except ValueError:
return None, parameter, expression | d77cf1916376e13db6aed7e4f662fc74e2a998fe | 685,848 |
from typing import TextIO
from typing import Tuple
from typing import Mapping
import toml
def parse_pyproject_toml(file: TextIO) -> Tuple[Mapping, Mapping]:
""" Parse a pyproject.toml file
This function assumes that the pyproject.toml contains a poetry and
poetry2conda config sections.
Parameters
----------
file
A file-like object containing a pyproject.toml file.
Returns
-------
A tuple with the poetry2conda and poetry config.
Raises
------
RuntimeError
When an expected configuration section is missing.
"""
pyproject_toml = toml.loads(file.read())
poetry_config = pyproject_toml.get("tool", {}).get("poetry", {})
if not poetry_config:
raise RuntimeError(f"tool.poetry section was not found on {file.name}")
poetry2conda_config = pyproject_toml.get("tool", {}).get("poetry2conda", {})
if not poetry2conda_config:
raise RuntimeError(f"tool.poetry2conda section was not found on {file.name}")
if "name" not in poetry2conda_config or not isinstance(
poetry2conda_config["name"], str
):
raise RuntimeError(f"tool.poetry2conda.name entry was not found on {file.name}")
return poetry2conda_config, poetry_config | d6459f0f7b9bbee125d766150708d4c985c08f8d | 685,850 |
def stations_level_over_threshold(stations, tol):
"""returns a list of tuples (MonitoringStation, MonitoringStation.relative_water_level) where
all relative water levels are > tol"""
statlist = []
for i in stations:
if i.relative_water_level() == None: #discounts all invalid data
next
elif i.relative_water_level() > tol:
statlist.append((i, i.relative_water_level()))
statlist.sort(key = lambda x: x[1], reverse=True) #sorts list by 2nd element (relative water level)
return statlist | 8b368ea664b5884e81d3ccb3d00560a37b90cf2f | 685,855 |
def score4_evaluation_function(game_state, agent, **context):
"""
This evaluation function comes from Score4 project:
https://github.com/ttsiodras/Score4
"Speaking of the scoreBoard function, I tried various forms to evaluate the
board. I ended up on a simple policy: measuring how many chips of the same
color exist, in spans of 4 going in any direction. I do this over each of
the board's cells, and then aggregate this in a table keeping the
aggregates from -4 to 4:
-4 means that the cell is a part of 4 cells that contain 4 yellow chips
-3 means that the cell is a part of 4 cells that contain 3 yellow chips
...
3 means that the cell is a part of 4 cells that contain 3 orange chips
4 means that the cell is a part of 4 cells that contain 4 orange chips
If 4 is found, the board is a win for the Orange player, and the function
returns orangeWins (i.e. 1000000). If -4 is found, the board is a win for
the Yellow player, and the function returns yellowWins (i.e. -1000000).
Otherwise, scaling factors are applied, so that the more '3'-cells found,
the more positive the board's score. Correspondingly, the more '-3' found,
the more negative the board's score."
"""
agent_index = agent.get_index()
counters = [0] * 9
board = game_state.get_board()
# Horizontal spans
for r in range(board.height()):
score = 0
for k in range(3):
if board.has_token(k, r):
if board.get_token(k, r) == agent_index:
score += 1
else:
score -= 1
for c in range(3, board.width()):
if board.has_token(c, r):
if board.get_token(c, r) == agent_index:
score += 1
else:
score -= 1
counters[int(score) + 4] += 1
if board.has_token(c-3, r):
if board.get_token(c-3, r) == agent_index:
score -= 1
else:
score += 1
# Vertical spans
for c in range(board.width()):
score = 0
for k in range(3):
if board.has_token(c, k):
if board.get_token(c, k) == agent_index:
score += 1
else:
score -= 1
for r in range(3, board.height()):
if board.has_token(c, r):
if board.get_token(c, r) == agent_index:
score += 1
else:
score -= 1
counters[score + 4] += 1
if board.has_token(c, r-3):
if board.get_token(c, r-3) == agent_index:
score -= 1
else:
score += 1
# Down-right (and up-left) diagonals
for r in range(board.height()-3):
for c in range(board.width()-3):
score = 0
for k in range(4):
rr = r + k
cc = c + k
if board.has_token(cc, rr):
if board.get_token(cc, rr) == agent_index:
score += 1
else:
score -= 1
counters[score + 4] += 1
# up-right (and down-left) diagonals
for r in range(3,board.height()):
for c in range(board.width()-3):
score = 0
for k in range(4):
rr = r - k
cc = c + k
if board.has_token(cc, rr):
if board.get_token(cc, rr) == agent_index:
score += 1
else:
score -= 1
counters[score + 4] += 1
max_score = 1000000
score = 0.0
if counters[0] != 0:
score = -max_score
elif counters[8] != 0:
score = max_score
else:
score = (counters[5] + 2*counters[6] + 5*counters[7] - counters[3] - 2*counters[2] - 5*counters[1])
score /= max_score
print("Agent: ", agent_index, ", State: ", game_state, ", Counter: ", counters, " => Score: ", score)
return score | 47f017f34c4a40d784336866877a06598fcbccc8 | 685,858 |
def check_valid(subset_json):
"""Helper to check if a file is valid, given a subset json instance
Args:
subset_json: Defined subset json file data
Returns:
bool: True/false value for validity
"""
if subset_json is None:
return lambda path: True
def curry(image_path):
if image_path in subset_json:
return True
return False
return curry | 5a0c08bd60f134c2aef1e02239f38716607ae958 | 685,867 |
def irc_prefix(var):
"""
Prefix a string with the irc_
:param var: Variable to prefix
:return: Prefixed variable
"""
if isinstance(var, str):
return 'irc_%s' % var.lower() | 13a22ef74844c939b14fb26078b6ab4c93948408 | 685,871 |
def get_data_files(filepath, prefix, num_epochs, num_features=41,
model_type='lstm'):
"""
model folder of type: type_prefix_features
model file of type: prefix_features_epochs.model
means and stddev file of type: means/stddev_prefix_numfeatures.npy
"""
num_epochs = str(num_epochs)
num_features = str(num_features)
model_name = '_'.join([model_type, prefix, num_features])
model_file = model_name + '_' + num_epochs + ".model"
model_path = filepath + model_name + "/"
means_file = '_'.join(["means", prefix, num_features]) + ".npy"
stddevs_file = '_'.join(["stddev", prefix, num_features]) + ".npy"
means_file = model_path + means_file
stddevs_file = model_path + stddevs_file
model_file = model_path + model_file
return model_file, means_file, stddevs_file | fa71549b3208b5ef27f9f1c6d0304d9be31602be | 685,873 |
def success_email_subject_msid_author(identity, msid, author):
"""email subject for a success email with msid and author values"""
return u"{identity}JATS posted for article {msid:0>5}, author {author}".format(
identity=identity, msid=str(msid), author=author
) | c65f946e87140c9c28166daa0e664a994910b559 | 685,874 |
def add_match_to_profile(profile, match, ismap=True, nucl=None):
"""Merge current read-gene matches to master profile.
Parameters
----------
profile : dict
Master gene profile.
match : dict
Read-gene matches.
ismap : bool, optional
Whether matches are a read-to-gene(s) map or simple counts.
nucl : str, optional
Prefix nucleotide Id to gene Ids.
See Also
--------
match_read_gene
"""
# prefix gene Id with nucleotide Id
def prefix(gene):
return '{}_{}'.format(nucl, gene) if nucl else gene
# read-to-gene(s) map
if ismap:
for ridx, genes in match.items():
for gene in genes:
profile.setdefault(prefix(gene), []).append(ridx)
# simple counts
else:
for gene, count in match.items():
gene = prefix(gene)
profile[gene] = profile.get(gene, 0) + count | 0431ec934881ccf2bb72f2367e6a1afcbcc38e5d | 685,880 |
def wildcard_filter( input_string ):
"""
A helper function which filters out a wildcard string containing
'ANY' and converts it to 'N/A'.
"""
if str(input_string).strip() == 'ANY':
return 'N/A'
else:
return input_string | e719ed05a22aaca2face9e95445c8f93ac2f8d0b | 685,881 |
def array(num_elements, element_func, *element_func_args):
"""
Returns array of elements with a length of num_elements.
Every element is generated by a call to element_func(*element_func_args).
"""
return [element_func(*element_func_args) for _ in range(num_elements)] | 859fd68cac9d3bb3d932aa329651eec4297d32e4 | 685,889 |
def deg_dms(decimal_degree):
"""
Convert angle in degrees to degree, minute, second tuple
:param degree: Angle in degrees
:return: (degree, minutes, second)
Example:
>>> import units as u
>>>
>>> u.dms_deg((45, 23, 34))
45.39277777777778
>>>
>>> u.deg_dms(45.392778)
(45, 23, 34)
"""
degree = int(decimal_degree) # Extract integer part
rm = 60*(decimal_degree - degree)
minutes = int(rm)
seconds = int(60*(rm-minutes))
return (degree, minutes, seconds) | d487aab58f6837b74bf257c5a5b364da01aaae86 | 685,890 |
def _last_test(test):
"""Returns True if given test is the last one."""
parent_tests = tuple(test.parent.testcases)
return parent_tests.index(test) == (len(parent_tests) - 1) | 075ec9fb00d880265a081b942396ac61bcf2bfcb | 685,892 |
def required_jenkins_settings(settings):
""" Checks if all settings required for interacting with jenkins build are set """
try:
return all([ settings.get('jenkins', setting) != '' for setting in ['url', 'username', 'password', 'job_name', 'build_num'] ])
except KeyError:
return False | 7e8aa7e0feaab08d46f5927f991406aebe29db3e | 685,900 |
import requests
def _return_response_and_status_code(response, json_results=True):
""" Output the requests response content or content as json and status code
:rtype : dict
:param response: requests response object
:param json_results: Should return JSON or raw content
:return: dict containing the response content and/or the status code with error string.
"""
if response.status_code == requests.codes.ok:
return dict(results=response.json() if json_results else response.content, response_code=response.status_code)
elif response.status_code == 400:
return dict(
error='package sent is malformed.',
response_code=response.status_code)
elif response.status_code == 404:
return dict(error='Requested URL not found.', response_code=response.status_code)
else:
return dict(response_code=response.status_code) | 1e432e667596b36b38b117cf9fe17c6e80b2baa1 | 685,904 |
def prune_wv(df, vocab, extra=["UUUNKKK"]):
"""Prune word vectors to vocabulary."""
items = set(vocab).union(set(extra))
return df.filter(items=items, axis='index') | 1e42a8426a5b931f9d611f7773d7d018c85ca507 | 685,907 |
def addressInNetwork(ip, net):
"""
Is an address in a network
"""
return ip & net == net | ea2983d96c79bf7e72e304cdc715ad983906b2d1 | 685,911 |
import torch
def get_all_pairs_indices(labels, ref_labels=None):
"""
Given a tensor of labels, this will return 4 tensors.
The first 2 tensors are the indices which form all positive pairs
The second 2 tensors are the indices which form all negative pairs
"""
if ref_labels is None:
ref_labels = labels
labels1 = labels.unsqueeze(1)
labels2 = ref_labels.unsqueeze(0)
matches = (labels1 == labels2).byte()
diffs = matches ^ 1
if ref_labels is labels:
matches.fill_diagonal_(0)
a1_idx, p_idx = torch.where(matches)
a2_idx, n_idx = torch.where(diffs)
return a1_idx, p_idx, a2_idx, n_idx | a3db5bfa5d064f0901ea6f6bbd6457356c74e3ba | 685,914 |
from typing import Set
def _negative_to_positive_state_indexes(indexes: Set[int], n_entries) -> Set[int]:
""" Convert negative indexes of an iterable to positive ones
Parameters
----------
indexes: Set[int]
indexes to check and convert
n_entries: int
total number of entries
Returns
-------
new_entries: Set[int]
the positive indexes
"""
new_entries: Set[int] = set()
for _, index in enumerate(indexes):
new_index = index + n_entries if index < 0 else index
if new_index >= n_entries:
err_msg = "State '{0}' exceeds the maximum number of states of '{1}'"
raise ValueError(err_msg.format(index, n_entries))
new_entries.add(new_index)
return new_entries | 65a66766a1eef881393ee5b89d6785f0ebcab6a5 | 685,916 |
import math
def distance(point_one, point_two):
"""Calculates the Euclidean distance from point_one to point_two
"""
return math.sqrt((point_two[0] - point_one[0]) ** 2 + (point_two[1] - point_one[1]) ** 2) | 92fe5b28046f6eb96fa6552e750ca62010d700da | 685,918 |
def matches_have_unknown(matches, licensing):
"""
Return True if any of the LicenseMatch in `matches` has an unknown license.
"""
for match in matches:
exp = match.rule.license_expression_object
if any(key in ('unknown', 'unknown-spdx') for key in licensing.license_keys(exp)):
return True | a64a4377b86cac05125d5c26e8325716f548f0e7 | 685,919 |
def mvt(a, b, fx = lambda x: x):
"""
Mean value theorem
Params:
a: start of interval
b: end of interval
fx: function
Returns:
f_c: derivative of some point c that is a <= c <= b
"""
return (fx(b) - fx(a))/(b - a) | d35e6cbaed79a0430ac33a02a95bd9cc7ad14477 | 685,920 |
def add_prefix(prefix, split, string):
"""
Adds a prefix to the given string
:param prefix: str, prefix to add to the string
:param split: str, split character
:param string: str, string to add prefix to
:return: str
"""
return split.join([prefix, string]) | 0f0e29be17ec771425617bd550ef99a61cd42062 | 685,927 |
import pathlib
def md_files(file_list):
"""Get list of markdown files from the repository file list"""
md_files = []
while file_list:
git_file = file_list.pop()
if pathlib.Path(git_file.name).suffix == ".md":
md_files.append(git_file)
return md_files | bf41715fac814c0b8bc467aab8787b611c41ac5f | 685,929 |
import math
def next_byte_power(value):
"""Calculate the next power of 2 from a value."""
char_bit = 8
byte_length = int(math.ceil(value.bit_length() / char_bit))
return 2 ** (char_bit * byte_length) | ea17b98507f9cdcba6a16e74e07e269e007923c1 | 685,930 |
def serialize_structures(storage):
"""
Serializes storage structures into dict.
:param dict storage: Storage dict.
:return: Serialized storage.
:rtype: dict
"""
return {
"tasks": [list(t._asdict().values()) for t in storage["tasks"]],
"groups": [list(g._asdict().values()) for g in storage["groups"]],
} | b19f18b41ac15dfc4e8776c97c741a33e8858933 | 685,934 |
def apply_twice(f, x):
"""Return f(f(x))
>>> apply_twice(square, 2)
16
>>> from math import sqrt
>>> apply_twice(sqrt, 16)
2.0
"""
return f(f(x)) | 19f0e6350e47926134bc6173a264c89d5ab9b6a3 | 685,937 |
def extract_wikipedia_page(line):
"""Extracts the Wikipedia page for an entity"""
if "sitelinks" in line and "enwiki" in line["sitelinks"]:
return line["sitelinks"]["enwiki"]["title"].strip().replace(" ", "_")
return None | 079d76967266e4248c340dd45e94d4a0c713ad11 | 685,938 |
def _get_port_interface_id_index(dbapi, host):
"""
Builds a dictionary of ports indexed by interface id.
"""
ports = {}
for port in dbapi.ethernet_port_get_by_host(host.id):
ports[port.interface_id] = port
return ports | 048844dda9e7069e195f6c6f8818c9b0d655bb08 | 685,940 |
import random
def random_correct_answer_message(correct_answer, points):
""" Return a random encouraging phrase for getting a right answer """
phrases = [
"Nailed it!",
"Nice one!",
"Great work!",
"You got it!",
"Woohoo, nice job!",
"You're amazing!",
"Crushed it!"
]
exclamations = [
"Booyah!",
"Bling, bling!",
"Yowza!",
"Hooray!",
"Huzzah!"
]
phrase = phrases[random.randint(0, len(phrases) - 1)]
exclamation = exclamations[random.randint(0, len(exclamations) - 1)]
return """
<amazon:emotion name="excited" intensity="high">
{}
The word was {}.
{}
You got {} points!
</amazon:emotion>
""".format(phrase, correct_answer, exclamation, points) | da019a5a0eba651fb85f622498341c15557589cd | 685,943 |
def is_protein_family(bio_ontology, node):
"""Return True if the given ontology node is a protein family."""
if bio_ontology.get_ns(node) == 'FPLX':
return True
return False | 7de9a5b7a11a4267a3a7904b6f9160cf9f9dc253 | 685,944 |
import re
def camel_to_snake(s: str) -> str:
"""Convert a string from camelCase string to snake_case.
Args:
s: String to be converted.
Returns:
A string where camelCase words have been converted to snake_case.
"""
return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower() | e29b5f7027be0692cecc9cbd145c864815b7cadc | 685,945 |
def inputNumber(message):
""" Get an input number from user. Prompt is str @message
"""
while True:
try:
userInput = int(input(message))
except ValueError:
print("Not an integer! Try again.")
continue
else:
return userInput
break | 14f9f87757fb50337b4b589b8f7d166ff52521e6 | 685,946 |
def d3(x):
"""Evaluate the estimate 3*x**2+2*x+1."""
return (3*x+2)*x+1 | 84b8c37237540a9cc01ebe161c536228388b77f7 | 685,954 |
def prompt_output(cli_input, converted=None):
"""Return expected output of simple_command, given a commandline cli_input string."""
return f'Opt: {cli_input}\n{converted or cli_input}\n' | beceecf80039452e68acb02005ab35ce939be735 | 685,957 |
def only_for_board_and_development(boards):
"""
Create a filter that is only considered when the given board matches and
when development is toggled.
"""
def _inner(context):
return context["board"] in boards and context["development"]
return _inner | b12d6f8d5aa45993375bdf2e7ff2a605fa5f4ee1 | 685,959 |
def convert_environment_id_string_to_int(
environment_id: str
) -> int:
"""
Converting the string that describes the environment id into an int which needed for the http request
:param environment_id: one of the environment_id options
:return: environment_id represented by an int
"""
try:
environment_id_options = {
"300: Linux Ubuntu 16.04": 300,
"200: Android (static analysis)": 200,
"160: Windows 10": 160,
"110: Windows 7": 110,
"100: Windows 7": 100,
"64-bit": 64,
"32-bit": 32,
}
return environment_id_options[environment_id]
except Exception:
raise Exception('Invalid environment id option') | 31f2927d554bd7008faa43a3e7540860106a4a95 | 685,961 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.