content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def lower_column(col):
""" Return column from lower level tables if possible
>>> metadata = sa.MetaData()
>>> s = sa.Table('accounts', metadata,
... sa.Column('name', sa.String),
... sa.Column('amount', sa.Integer),
... sa.Column('id', sa.Integer, primary_key=True),
... )
>>> s2 = select([s])
>>> s2.c.amount is s.c.amount
False
>>> lower_column(s2.c.amount) is s.c.amount
True
>>> lower_column(s2.c.amount)
Column('amount', Integer(), table=<accounts>)
"""
old = None
while col is not None and col is not old:
old = col
if not hasattr(col, 'table') or not hasattr(col.table, 'froms'):
return col
for f in col.table.froms:
if f.corresponding_column(col) is not None:
col = f.corresponding_column(col)
return old | c204ab8dcf63abd50ff0213f1f72043b0102f43d | 687,675 |
def tuple_replace(tup, *pairs):
"""Return a copy of a tuple with some elements replaced.
:param tup: The tuple to be copied.
:param pairs: Any number of (index, value) tuples where index is the index
of the item to replace and value is the new value of the item.
"""
tuple_list = list(tup)
for index, value in pairs:
tuple_list[index] = value
return tuple(tuple_list) | ec4c52d3257bd33b31a45e3d298aa934436efb4a | 687,680 |
import re
def separate_words(name):
"""Convenience function for inserting spaces into CamelCase names."""
return re.sub(r"(.)([A-Z])", r"\1 \2", name) | a2c2db19d9eddf94edd846f0752ca237cb99e441 | 687,685 |
def first_is_not(l, v):
"""
Return first item in list which is not the specified value.
If all items are the specified value, return it.
Parameters
----------
l : sequence
The list of elements to be inspected.
v : object
The value not to be matched.
Example:
--------
>>> first_is_not(['a', 'b', 'c'], 'a')
'b'
"""
return next((_ for _ in l if _ is not v), v) | c514e22338060a71c46fd1cc39ae7f9f273de158 | 687,686 |
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str | c0277b5a91a26c0fdb01c98d0e91b3a9c7604285 | 687,687 |
def _get_morphometry_data_suffix_for_surface(surf):
"""
Determine FreeSurfer surface representation string.
Determine the substring representing the given surface in a FreeSurfer output curv file. For FreeSurfer's default surface 'white', the surface is not represented in the output file name pattern. For all others, it is represented by a dot followed by the name.
Parameters
----------
surf: string
A string representing a FreeSurfer surface, e.g., 'white' or 'pial'.
Returns
-------
string
The empty string if `surf` is 'white'. A dot followed by the string in the input argument `surf` otherwise.
Examples
--------
>>> import brainload.freesurferdata as fsd
>>> print fsd._get_morphometry_data_suffix_for_surface('pial')
.pial
"""
if surf == 'white':
return ''
return '.' + surf | fcc9a4611bd056a7159d88e5b8706efa40e2ab67 | 687,688 |
import math
def format_duration(seconds: float) -> str:
"""Formats duration in seconds into hours/minutes/seconds."""
if seconds < 60:
return f'{seconds:.0f}s'
elif seconds < 3600:
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
return f'{minutes}m{seconds:.0f}s'
else:
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
return f'{hours}h{minutes}m{seconds:.0f}s' | 0d14b1dcf389501c21ea1f14f0808b926a952c44 | 687,690 |
def save_as(user):
"""
Returns a callback which saves the model as the user that you
pass in
"""
def user_save(instance):
instance.save(user)
return user_save | e86e29369e2306eee3189b32db87457c13ace8b5 | 687,698 |
def get_test_count(test_data):
"""
Args:
test_data: json of test data
Returns:
int: total test count
"""
return int(test_data.get("testsuites").get("testsuite").get("@tests")) | a65bb6bdccd789609b1ce25faf2740f1de6b1f78 | 687,699 |
def map_diameter(c: int) -> float:
""" Compute the diameter """
return 1 / 3 * (c + 1) * (c - 1) | fb3f19478901f40b52af11c6d200f125d3716112 | 687,700 |
def label_set_match(object_labels, returned_labels):
"""Return True if at least one of the object labels is contained in at least one of the returned labels"""
for o in object_labels:
for r in returned_labels:
if o.lower() in r.lower():
return True
return False | 86e0b1e73fb9c370fc8a25a89dca4476abd2f3b2 | 687,705 |
def chain_value(row, attribute_ids):
"""
Join all the values of attributes to get a identifier
:param row: a row of the table, e.g. a list of attribute values
:param attribute_ids: a set of attribute
:return: a string consists of joint value
"""
result = []
for attribute_id in sorted(attribute_ids):
result.append(row[attribute_id])
return '-'.join(result) | 22c303f9286ae46cc96eccb556178edd2e5d180e | 687,710 |
def shared_data_volume_container_path(sdv, sdvkey):
# type: (dict, str) -> str
"""Get shared data volume container path
:param dict sdv: shared_data_volume configuration object
:param str sdvkey: key to sdv
:rtype: str
:return: container path
"""
return sdv[sdvkey]['container_path'] | e9a3455a71f6862039ab092ccc5ede46a58c71f4 | 687,711 |
def isYes(string):
"""Returns True if the string represents a yes, False, if it represents
no, and another string if it represents something else"""
value = string.strip().lower()
if value in ['yes', 'always', 'on', 'true']:
return True
if value in ['no', 'never', 'off', 'false', 'null']:
return False
if value in ['changed', 'change', 'onchange', 'on_change', 'diff']:
return 'change' | 3c8d5e54daf02fe5add3202cb4d3d0942a43616e | 687,715 |
def _add_new_line_if_none(s: str):
"""Since graphviz 0.18, need to have a newline in body lines.
This util is there to address that, adding newlines to body lines
when missing."""
if s and s[-1] != "\n":
return s + "\n"
return s | aed3582be53cf403601a125cebc436041257a0d9 | 687,716 |
def build_from_cfg(name, cfg, registry, default_args=None):
"""Build a module from config dict.
Args:
name (str): Name of the object
cfg (addict): Config dict of the object
registry (:obj:`Registry`): The registry to search the type from.
default_args (dict, optional): Default initialization arguments.
Returns:
obj: The constructed object.
"""
assert isinstance(default_args, dict) or default_args is None
obj = registry.get(name)
if obj is None:
raise KeyError(f'{name} is not in the {registry.name} registry. '
f'Choose among {list(registry.obj_dict.keys())}')
if default_args is not None:
for key, value in default_args.items():
cfg.setdefault(key, value)
return obj(cfg) | 8eb9b9717764ae1c1ca121a59b466ef7c2d72f70 | 687,718 |
import glob
def getFeatures(dirSrc, extFilesSrc, lsGenres):
""" Reads samples in form of multiple text files and returns a pair of
feature matrix X and target vector y.
dirSrc: Absolute path to directory of source text files
extFilesSrc: File extension of source text files (e.g. "txt")
lsGenres: List of target genres. Source text files are expected to be
in the form of <genre>*.<fileExtension>
returns: (X, y) where X is feature matrix and y is target vector
"""
# correct path
dirSrc = dirSrc.replace('\\', '/')
if dirSrc[-1] != '/':
dirSrc += '/'
# get features
features = []
targets = []
for genre in lsGenres:
filesSrc = glob.glob(dirSrc + genre + '*.' + extFilesSrc)
for pathFile in filesSrc:
with open(pathFile) as file:
featureVector = [float(line) for line in file]
features.append(featureVector)
targets.append(genre);
return (features, targets) | 47b0ddbe36e34788ba3eed20aebc8852a36e8245 | 687,719 |
import torch
def truncated_normal(size, std):
"""
Pytorch does not have a truncated normal function to we manually make one
in order to cut the dependancy on tensorflow
Modified Version of: https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/20
"""
mean = 0
tensor = torch.zeros(size)
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
return tensor | 0064f5a5c97809072b9493f823667d4036921d50 | 687,723 |
def compare_equal(compare_data): # pylint: disable=redefined-outer-name
"""
Returns a function which checks that a given data is equal to the stored reference.
"""
return lambda data, tag=None: compare_data(lambda x, y: x == y, data, tag) | 45215759bb4aac2a9fe304e75c40e3ff70acd787 | 687,725 |
def type_validator(property_type):
"""Create validator that requires specific type.
Args:
property_type: The type of the validator.
Returns:
Validator that only accepts values of a specific type.
"""
def type_validator_impl(value):
if not isinstance(value, property_type):
raise TypeError('Property must be type %s' % property_type.__name__)
return True
return type_validator_impl | 8ba043af22a133ed5bd613bf7a926912d4053275 | 687,726 |
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x | 156de3b3b9689460a4a84bcd4e868a2888299d09 | 687,730 |
def get_agent_location_from_maze_string(mazeString=str):
"""[Get the location of the agent from the maze]
Args:
mazeString ([str], optional): [string of the maze location]. Defaults to str.
Returns:
[tuple]: [location of the maze]
"""
mazeString = [list(x.strip()) for x in mazeString.split("\n") if x]
rows = len(mazeString)
cols = len(mazeString[0])
for i in range(rows):
for j in range(cols):
if 'o' == mazeString[i][j]:
return i, j
return -1, -1 | 62e15c2d4b90283649fa25cc4b07d657eed2ea8b | 687,731 |
def decompose_path(path):
"""
Break a path down into individual parts
Parameters
----------
path : string
Path to variable on the
Returns
-------
structure : tuple of strings
Tuple of split apart path
"""
return tuple((path_entry for path_entry in path.split('/') if path_entry != '')) | 09d33183fbd4020f85bfc2e178dc387064262bde | 687,732 |
from typing import OrderedDict
def groupby(items, by):
"""
Group items using a function to derive a key.
:param items:
The items to group
:param by:
A lambda function to create a key based on the item
:return:
an Ordered dict
"""
result = OrderedDict()
for item in items:
key = by(item)
if key in result:
result[key].append(item)
else:
result[key] = [item]
return result | 21b10c5fb4ba795cd3a43f380c8cdb823b3c2bfe | 687,734 |
def check_textbound_overlap(anns):
"""
Checks for overlap between the given TextBoundAnnotations.
Returns a list of pairs of overlapping annotations.
"""
overlapping = []
for a1 in anns:
for a2 in anns:
if a1 is a2:
continue
if a2.start < a1.end and a2.end > a1.start:
overlapping.append((a1,a2))
return overlapping | e10618fc2cc1642ffd295ed59f939a120094254a | 687,735 |
def det3x3(matrix):
"""
Calculate a determinant of a 3x3 matrix. Should be usually substituted
by `numpy.linalg.det`, but is indispensable for matrices with uncertainties.
:param matrix: 3x3 array/matrix which allows for 2d indexing
:type matrix: numpy.ndarray or uncertainties.unumpy.matrix
:return: Determinant of the matrix
:rtype: int or float or uncertainties.core.Variable
"""
m11, m12, m13 = matrix[0, 0], matrix[1, 0], matrix[2, 0]
m21, m22, m23 = matrix[0, 1], matrix[1, 1], matrix[2, 1]
m31, m32, m33 = matrix[0, 2], matrix[1, 2], matrix[2, 2]
return m11 * m22 * m33 + m12 * m23 * m31 + m13 * m21 * m32 \
- m13 * m22 * m31 - m12 * m21 * m33 - m11 * m23 * m32 | cf212c2e8710a2f4c60b091eede77b5b1c8b78b7 | 687,736 |
def get_counts(filename):
"""
reads the .counts_edit file and extracts the counts
:param filename: counts file (original or simulated)
:return: list of counts
"""
with open(filename, "r") as counts_file:
counts = []
for line in counts_file:
line = line.strip()
if line.startswith('>'): # taxa name
continue
else:
if line=="x" or line=="X": # discard counts with x
continue
counts.append(int(line))
return counts | 2bb9914fbca82550e121f0dcb5c55f6817da6c91 | 687,739 |
def _get_human_bytes(num_bytes):
"""Return the given bytes as a human friendly KB, MB, GB, or TB string
thanks https://stackoverflow.com/questions/12523586/python-format-size-application-converting-b-to-kb-mb-gb-tb
"""
num_bytes = float(num_bytes)
one_kb = float(1024)
one_mb = float(one_kb ** 2) # 1,048,576
one_gb = float(one_kb ** 3) # 1,073,741,824
one_tb = float(one_kb ** 4) # 1,099,511,627,776
if num_bytes < one_kb:
return '{0} {1}'.format(bytes, 'Bytes' if 0 == num_bytes > 1 else 'Byte')
elif one_kb <= num_bytes < one_mb:
return '{0:.2f} KB'.format(num_bytes / one_kb)
elif one_mb <= num_bytes < one_gb:
return '{0:.2f} MB'.format(num_bytes / one_mb)
elif one_gb <= num_bytes < one_tb:
return '{0:.2f} GB'.format(num_bytes / one_gb)
else:
return '{0:.2f} TB'.format(num_bytes / one_tb) | b48a74b619734df839db752a247d5e6fcd6da05e | 687,745 |
import re
def get_brute(brute_file, mini=1, maxi=63, banned='[^a-z0-9_-]'):
"""
Generates a list of brute-force words based on length and allowed chars
"""
# Read the brute force file into memory
with open(brute_file, encoding="utf8", errors="ignore") as infile:
names = infile.read().splitlines()
# Clean up the names to usable for containers
banned_chars = re.compile(banned)
clean_names = []
for name in names:
name = name.lower()
name = banned_chars.sub('', name)
if maxi >= len(name) >= mini:
if name not in clean_names:
clean_names.append(name)
return clean_names | 57ace4ffcb311c232e9c275e9e033d155beb4316 | 687,748 |
def get_post_from_row(post_row):
"""
extracts the post body from a post row
"""
return post_row.find("td").find("div", class_="content") | e2e75320d4b801cd76da3027b958ccced28f34ba | 687,749 |
def get_status(query_type, command, execution_status, status_dict):
""" Get the status code of a command according to a query
Args:
query_type (str): Type of status query, chunk in this case
command (str): Command name
execution_status: State of the last given command execution
status_dict (:obj: `dict`): Dictionary of status according to a query type, a command name and a state
Return:
str: status according to query type, command and execution status
"""
return status_dict[query_type][command][execution_status] | 78c28790a2824696e54f1556c9fb32e6ee972e47 | 687,751 |
def get_all_individuals_to_be_vaccinated_by_area(
vaccinated_compartments, non_vaccination_state, virus_states, area
):
"""Get sum of all names of species that have to be vaccinated.
Parameters
----------
vaccinated_compartments : list of strings
List of compartments from which individuals are vaccinated.
non_vaccination_state : str
Name of state indicates non-vaccinated individuals.
virus_states : list of strings
List containing the names of the virus types.
area : str
Name of the area.
Returns
-------
vaccinated_individuals : str
Sum of all names of species that have to be vaccinated.
"""
vaccinated_individuals = ""
for index_compartments in vaccinated_compartments:
for index_virus in virus_states:
for index_areas in [area]:
if index_compartments != "susceptible":
state = f"{index_compartments}_{index_areas}_{non_vaccination_state}_{index_virus}"
vaccinated_individuals = vaccinated_individuals + "+" + state
for index_areas in [area]:
state = f"susceptible_{index_areas}_{non_vaccination_state}"
vaccinated_individuals = vaccinated_individuals + "+" + state
return vaccinated_individuals | 45ba235d14df08455304ce20d7b39e590b93e872 | 687,753 |
def get_lt(hdr):
"""Obtain the LTV and LTM keyword values.
Note that this returns the values just as read from the header,
which means in particular that the LTV values are for one-indexed
pixel coordinates.
LTM keywords are the diagonal elements of MWCS linear
transformation matrix, while LTV's are MWCS linear transformation
vector (1-indexed).
.. note:: Translated from ``calacs/lib/getlt.c``.
Parameters
----------
hdr : obj
Extension header.
Returns
-------
ltm, ltv : tuple of float
``(LTM1_1, LTM2_2)`` and ``(LTV1, LTV2)``.
Values are ``(1, 1)`` and ``(0, 0)`` if not found,
to accomodate reference files with missing info.
Raises
------
ValueError
Invalid LTM* values.
"""
ltm = (hdr.get('LTM1_1', 1.0), hdr.get('LTM2_2', 1.0))
if ltm[0] <= 0 or ltm[1] <= 0:
raise ValueError(f'(LTM1_1, LTM2_2) = {ltm} is invalid')
ltv = (hdr.get('LTV1', 0.0), hdr.get('LTV2', 0.0))
return ltm, ltv | 8bbe18f09c82c0273ac177b5b1090bd8b490dd8e | 687,754 |
import pathlib
def data_file(module, *comps):
"""Return Path object of file in the data directory of an app."""
return pathlib.Path(module.__file__).parent.joinpath('..', 'data', *comps) | 1ee38b920eacb1ac90ee260c73242fdf5d7db98f | 687,755 |
def depend_on_proj_props(target, source, env):
""" Emitter which adds a dependency for the project properties file """
#sys.stderr.write("depend_on_proj_props called\n")
#sys.stderr.flush()
return (target, source + [env['XISE_PY_PROPFILE']]) | f0ea4c5aa0a6958e71dd051a6aff08cf9318d136 | 687,756 |
from bs4 import BeautifulSoup
from typing import List
from typing import Dict
import re
def parse_row(row: BeautifulSoup, columns: List[str], table_id: str) -> Dict[str, str]:
"""Takes a BeautifulSoup tag corresponding to a single row in an HTML table as input,
along with an ordered list of normalized column names.
Labels data in each row according to the position of the column names.
Returns a dict of labeled data, suitable for transformation into ndjson
"""
def extract_appt_slot_count(appt_slots: str) -> str:
pattern = re.compile(r"(\d+) slots")
match = re.search(pattern, appt_slots)
return "0" if match is None else match.group(1)
data = [td.contents for td in row.find_all("td")]
assert len(data) >= len(
columns
), "Failed to parse row, column and field mismatch! {data}, {columns}"
result: Dict[str, str] = {}
for key, value in zip(columns, data):
if key == "clinic":
# Speculatively assign the address field from the clinic name. At least one
# store has a blank address field but contains the address in the clinic name
try:
clinic, _, address = tuple(value)
result["clinic"] = clinic
result["address"] = address
except ValueError:
# Not every store contains the address in the clinic name
result["clinic"] = value[0]
if key == "slots":
result[key] = extract_appt_slot_count(str(value[0]))
else:
if len(value) != 0:
result[key] = value[0]
result["row_id"] = row.attrs["data-row_id"]
result["table_id"] = table_id
return result | 06dd895243f614f2a0d6fb3b40fc857ae50274ae | 687,758 |
def ses(count, plural='s', singular=''):
"""
ses is pronounced "esses".
Return a string suffix that indicates a singular sense if
the count is 1, and a plural sense otherwise.
So, for example:
log.info("%d item%s found", items, utils.ses(items))
would log:
"1 item found" if items was 1
and:
"0 items found" if items was 0
And:
log.info("%d famil%s found", 1, ses(1, singular='y', plural='ies'))
or:
log.info("%d famil%s found", 1, ses(1, 'ies', 'y'))
would log:
"1 family found" if items was 1
and:
"10 families found" if items was 10
Note that the default covers pluralization for most English words and the positional
override is ordered to handle most easily handfle other cases.
This function officially has the highest comment:code ratio in our codebase.
"""
return (singular if count == 1 else plural) | f0fe352df642ba442e620bbc5aefee613747588b | 687,761 |
import math
def fpart(x):
"""Returns the fractional part of x."""
return x - math.floor(x) | 022ef9a11356e24a9cfb218640b67ff2b11fe58a | 687,769 |
def host_dict(host):
"""Convert a host model object to a result dict"""
if host:
return host.state
else:
return {} | 5cbc0471f87deb2214f1649fba23b093641f84d9 | 687,770 |
def get_total_results(results):
"""Return the totalResults object from a Google Analytics API request.
:param results: Google Analytics API results set
:return: Number of results
"""
if results['totalResults']:
return results['totalResults'] | 3cc5604017ebb9eab98f2f99f1045c13cd1402f1 | 687,771 |
from typing import List
def _format_results(results: list) -> List[dict]:
"""
Args:
results: a list of results with the following format:
['"<hash>","<anchor_hash>",<score>']
Returns: a list of dictionaries containing the hash and score
"""
formatted_res = []
# Remove last empty value returned from the process
if not results[-1]:
results = results[:-1]
for result in results:
result = result.split(',')
formatted_res.append({
'hash': result[0].strip('"'),
'similarityValue': int(result[2])
})
return formatted_res | 2ce02ae0a5181c94a89b209735c1bba80deeae76 | 687,772 |
def textarea(
rows="",
span=2,
placeholder="",
htmlId=False,
inlineHelpText=False,
blockHelpText=False,
focusedInputText=False,
required=False,
disabled=False,
prepopulate=False):
"""
*Generate a textarea - TBS style*
**Key Arguments:**
- ``rows`` -- the number of rows the text area should span
- ``span`` -- column span
- ``placeholder`` -- the placeholder text
- ``htmlId`` -- html id for item
- ``inlineHelpText`` -- inline and block level support for help text that appears around form controls
- ``blockHelpText`` -- a longer block of help text that breaks onto a new line and may extend beyond one line
- ``focusedInputText`` -- make the input focused by providing some initial editable input text
- ``required`` -- required attribute if the field is not optional
- ``disabled`` -- add the disabled attribute on an input to prevent user input
**Return:**
- ``textarea`` -- the textarea
"""
if span:
span = "span%(span)s" % locals()
else:
span = ""
if inlineHelpText:
inlineHelpText = """<span class="help-inline">%(inlineHelpText)s</span>""" % locals(
)
else:
inlineHelpText = ""
if blockHelpText:
blockHelpText = """<span class="help-block">%(blockHelpText)s</span>""" % locals(
)
else:
blockHelpText = ""
if not focusedInputText:
focusedInputText = ""
focusId = ""
else:
focusId = "focusedInput"
if required:
required = """required"""
else:
required = ""
if disabled:
disabled = """disabled"""
disabledId = "disabledId"
else:
disabled = ""
disabledId = ""
if not htmlId:
htmlId = ""
name = "textarea"
else:
name = htmlId
if prepopulate is False:
prepopulate = ""
textarea = """<textarea rows="%(rows)s" class="%(span)s" id="%(htmlId)s%(focusId)s%(disabledId)s" value="%(focusedInputText)s" %(required)s %(disabled)s placeholder="%(placeholder)s" name="%(name)s">%(prepopulate)s</textarea>%(inlineHelpText)s%(blockHelpText)s""" % locals(
)
return textarea | 50956f0296e275385acc2e12ea063074c32887f9 | 687,774 |
def _construct_target_subscription(_target_id, _target_type='board'):
"""This function constructs a dictionary for an individual subscription target to be used in a payload.
.. versionadded:: 3.5.0
:param _target_id: The unique identifier for the target (e.g. Node ID)
:type _target_id: str
:param _target_type: The target type (``board`` by default)
:type _target_type: str
:returns: The dictionary for the individual target
:raises: :py:exc:`TypeError`
"""
_target = {
"type": "subscription",
"target": {
"type": _target_type,
"id": f"{_target_id}"
}
}
return _target | df66023aedc3306c8ffaa9f9dbccdb3811fc660b | 687,775 |
def monkey_patch(cls):
""" Return a method decorator to monkey-patch the given class. """
def decorate(func):
name = func.__name__
func.super = getattr(cls, name, None)
setattr(cls, name, func)
return func
return decorate | a9fa473ab70e609740750e9347d61d1184689159 | 687,777 |
def pil_to_flatten_data(img):
"""
Convert data from [(R1, G1, B1, A1), (R2, G2, B2, A2)] to [R1, G1, B1, A1, R2, G2, B2, A2]
"""
return [x for p in img.convert('RGBA').getdata() for x in p] | 3c6dbd84a5f664e2301c72f85fd19ecd0824ecaa | 687,778 |
def count_numeric(df):
"""
Returns the number of numeric variables in the dataset.
Parameters:
df (pandas DataFrame): Dataset to perform calculation on
Returns:
counter_numeric (int): Number of numeric variables in df
"""
counter_numeric = 0
for i in range(len(df.columns)):
col = df.iloc[:, i]
if col.dtype.name == 'int64' or col.dtype.name == 'float64':
counter_numeric += 1
return counter_numeric | eb31507163a3193ae1c67ed282f344aad50b2b22 | 687,782 |
def bahai_major(date):
"""Return 'major' element of a Bahai date, date."""
return date[0] | 248c0d317db55f2ad1148b91a2058cac66da664f | 687,783 |
def check_alpha_signs(svm):
"""Returns the set of training points that violate either condition:
* all non-support-vector training points have alpha = 0
* all support vectors have alpha > 0
Assumes that the SVM has support vectors assigned, and that all training
points have alpha values assigned."""
points = svm.training_points
support_vectors = svm.support_vectors
bad_alphas = []
for point in points:
if point in support_vectors and point.alpha <= 0:
bad_alphas.append(point)
elif point not in support_vectors and point.alpha != 0:
bad_alphas.append(point)
return set(bad_alphas) | ab3868cc5b4f954d6c6158e804d19c95834704a9 | 687,786 |
import random
def luck(n=2):
""" gives 1 chance out of n (default: 2) to return True """
assert n > 1
return bool(random.randint(0, n-1)) | 1d0990ca1586d865e8da57ec4fff8715738c8b90 | 687,788 |
def convert_dict(my_dict):
"""Convert dictionaries from Netmiko format to NAPALM format."""
new_dict = {}
for k, v in my_dict.items():
new_dict[k] = v
hostname = new_dict.pop('host')
new_dict['hostname'] = hostname
device_type = new_dict.pop('device_type')
new_device_type = device_type.split('_')[1]
new_dict['device_type'] = new_device_type
return new_dict | 13431c74534199ee73ed7a75ae34885872495f91 | 687,789 |
def bspline(p, j, x):
"""
Return the value at x in [0,1[ of the B-spline with integer nodes of degree p with support starting at j.
Implemented recursively using the [De Boor's Algorithm](https://en.wikipedia.org/wiki/De_Boor%27s_algorithm)
.. math::
B_{i,0}(x) := \left\{
\begin{matrix}
1 & \mathrm{if} \quad t_i ≤ x < t_{i+1} \\
0 & \mathrm{otherwise}
\end{matrix}
\right.
.. math::
B_{i,p}(x) := \frac{x - t_i}{t_{i+p} - t_i} B_{i,p-1}(x)
+ \frac{t_{i+p+1} - x}{t_{i+p+1} - t_{i+1}} B_{i+1,p-1}(x).
"""
assert ((x >= 0.0) & (x <= 1.0))
assert ((type(p) == int) & (type(j) == int))
if p == 0:
if j == 0:
return 1.0
else:
return 0.0
else:
w = (x - j) / p
w1 = (x - j - 1) / p
return w * bspline(p - 1, j, x) + (1 - w1) * bspline(p - 1, j + 1, x) | b634c3e09936074e50c919aa9bfad0b5c6b00411 | 687,790 |
def dict_partial_from_keys(keys):
"""Return a function that constructs a dict with predetermined keys."""
def dict_partial(values):
return dict(zip(keys, values))
return dict_partial | b738face4fa1874ab8ad1072c84afcde6f63b239 | 687,792 |
def create_accounttax_sample(account_id, **overwrites):
"""Creates a sample accounttax resource object for the accounttax samples.
Args:
account_id: int, Merchant Center ID these tax settings are for.
**overwrites: dictionary, a set of accounttax attributes to overwrite
Returns:
A new accounttax resource in dictionary form.
"""
tax = {
'accountId': account_id,
'rules': [{
'country': 'US',
'locationId': 21167,
'useGlobalRate': True
}]
}
tax.update(overwrites)
return tax | e3a105b2ee2c798d80746104ef242f5c4b3e53c0 | 687,793 |
import shlex
def split_args(line):
"""Version of shlex.split that silently accept incomplete strings.
Parameters
----------
line : str
The string to split
Returns
-------
[str]
The line split in separated arguments
"""
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
lex.commenters = ''
res = []
try:
while True:
res.append(next(lex))
except ValueError: # No closing quotation
pass
except StopIteration: # End of loop
pass
if lex.token:
res.append(lex.token)
return res | 19837bf875d108f96349f848f6f3605795469a40 | 687,794 |
def delete_cluster(dataproc, project, region, cluster):
"""Delete the cluster."""
print('Tearing down cluster.')
result = dataproc.delete_cluster(
project_id=project, region=region, cluster_name=cluster)
return result | e5a1a2b97b36e2860de508aa8daedb7928f044e6 | 687,797 |
def ReshapeShortFat(original):
"""
ReshapeShortFat(original)
Reshapes the image numpy array from original shape
to the ShortFat single row shape and captures
shape info in the output:
channel_0, channel_1, channel_2
functionally performs original.reshape(1, x*y*z) to create single row vector
inputs:
original - input the oringal image array
return:
ShortFatArray
channel_0 - dimensions of the first channel - possibly the Red channel if using RGB
channel_1 - dimensions of the first channel - possibly the Green channel if using RGB
channel_2 - dimensions of the first channel - possibly the Blue channel if using RGB
"""
# preserve shape of incoming image
channel_0, channel_1, channel_2 = original.shape
#print('a shape ', a.shape)
# convert to short, fat array
ShortFatArray = original.reshape(1, channel_0*channel_1*channel_2)
#print('a1 shape ', a1.shape)
return ShortFatArray.squeeze(), channel_0, channel_1, channel_2 | a1f02dfb3e675011bf08bf8261355217df25006d | 687,799 |
def get_class_prob(predictions_dict):
"""Get true and predicted targets from predictions_dict.
Parameters
----------
predictions_dict : dict
Dict of model predictions. Must contain "target_true" and "target_pred" keys with corresponding
dict values of the form class_label : probability.
Returns
-------
true_dict : dict
True target - value for "target_true" key from the predictions_dict.
pred_dict : dict
Predicted target - value for "target_pred" key from the predictions_dict.
"""
true_dict = predictions_dict.get("target_true")
pred_dict = predictions_dict.get("target_pred")
if true_dict is None or pred_dict is None:
raise ValueError("Each element of predictions list must be a dict with target_true and target_pred keys")
return true_dict, pred_dict | d5876043d1bb2fcea25bf1892fd0b35f07e98db9 | 687,800 |
def generate_video_url(video_id:str)->str:
"""Takes video Id and generates the video url
example https://www.youtube.com/watch?v=e3LqeN0e0as
"""
return f'https://www.youtube.com/watch?v={video_id}' | f48759a223ac831c46d51e4c56ce098de54425b5 | 687,801 |
import base64
def base64url_encode(msg):
"""
Encode a message to base64 based on JWT spec, Appendix B.
"Notes on implementing base64url encoding without padding"
"""
normalb64 = base64.urlsafe_b64encode(msg)
return normalb64.rstrip(b'=') | 315b0b0e5041da30c3bfd97de1759f8de8cd4ea3 | 687,803 |
def num_parameters(model):
"""
Returns the number of parameters in the given model
Parameters
----------
model : torch.nn.Module
the model to count the parameters of
Returns
-------
int
the number of parameters in the given model
"""
n = 0
for p in model.parameters():
n += p.numel()
return n | cb4a2fe4f383c0765f560a1fbc8e7cc404b1eca0 | 687,805 |
def simplex_dimension(simplex):
"""
Get the dimension of a simplex.
:param simplex: Simplex defined by a list of vertex indices.
:type simplex: List[int]
:return: Dimension of the simplex.
"""
return len(simplex) - 1 | 4aef0e6d1af41cc6e1d4e1977625ec1f0476c2b9 | 687,807 |
def ssXXsuffix( i ):
"""Turns an integer into an ssXX ending between .ss01 and .ss20, e.g. 5 -> '.ss05'."""
i = i%21 # max 20
if not i: # if 0
i = 1
return ".calt.ss%.2d" % ( i ) | 3c2961e8e085c86dcc0cb7b950d1a4fe94de9ba8 | 687,810 |
def decode_rgb565(val):
"""Decode a RGB565 uint16 into a RGB888 tuple."""
r5 = (val & 0xf800) >> 11
g6 = (val & 0x7e0) >> 5
b5 = val & 0x1f
return (
int((r5 * 255 + 15) / 31),
int((g6 * 255 + 31) / 63),
int((b5 * 255 + 15) / 31)
) | 0c9a67016df686eb23282de74f663493caa305a9 | 687,811 |
from typing import OrderedDict
def quant_params_vec2dict(keys, vals, search_clipping=False):
"""
Convert the vector(s) created by quant_params_dict2vec to a dictionary of quantization parameters that
the post-training quantizer API can digest
"""
res = OrderedDict()
for idx, k in enumerate(keys):
if search_clipping and k.endswith('_min'):
res[k[:-4]] = sorted((vals[idx], vals[idx + 1]))
elif search_clipping and k.endswith('_max'):
continue
else:
res[k] = abs(vals[idx])
return res | facdcaea5ae0504017d51a7d61c682322f70d23f | 687,812 |
from bs4 import BeautifulSoup
def clean_data(data):
"""
Strip HTML and clean spaces
"""
data = BeautifulSoup(data, "lxml").text.strip()
data = ' '.join(data.split()).encode("utf-8")
return data | 78034485428aaf8e2d70f02fa3d3011332bea407 | 687,815 |
import pkg_resources
def get_template(name):
"""
Look for 'name' in the vr.runners.templates folder. Return its contents.
>>> import six
>>> tmpl = get_template('base_image.lxc')
>>> isinstance(tmpl, six.string_types)
True
"""
path = 'templates/' + name
b_stream = pkg_resources.resource_stream('vr.imager', path)
return b_stream.read().decode('utf-8') | 75d95692b9f9e386c219650165256462ec736762 | 687,818 |
def service_exists(keystone, service_name):
""" Return True if service already exists"""
return service_name in [x.name for x in keystone.services.list()] | f885e8a3df9b9f20f39a97ae1a481eed26694bc4 | 687,819 |
def material_type(rec):
"""Determine material type for record (arg1).
Returns:
A string, one of BK (books), CF (computer files), MP
(maps), MU (music), CR (continuing resource), VM (visual
materials), MX (mixed materials)
"""
l = rec[0]
# Book: Leader/06 (Type of record) contains code a (Language
# material) or t (Manuscript language material) and Leader/07
# (Bibliographic level) contains code a (Monographic component
# part), c (Collection), d (Subunit), or m (Monograph)
if l[1] in ("a", "t") and l[2] in ("a", "c", "d", "m"):
return "BK"
# Computer File: Leader/06 (Type of record) contains code m
if l[1] == "m":
return "CF"
# Map: Leader/06 (Type of record) contains code e (Cartographic
# material) or f (Manuscript cartographic material)
if l[1] in ("e", "f"):
return "MP"
# Music: Leader/06 (Type of record) contains code c (Notated
# music), d (Manuscript notated music), i (Nonmusical sound
# recording), or j (Musical sound recording)
if l[1] in ("c", "d", "i", "j"):
return "MU"
# Continuing resources: Leader/06 (Type of record) contains code a
# (Language material) and Leader/07 contains code b (Serial
# component part), i (Integrating resource), or code s (Serial)
if l[1] == "a" and l[2] in ("b", "i", "s"):
return "CR"
# Visual materials: Leader/06 (Type of record) contains code g
# (Projected medium), code k (Two-dimensional nonprojectable
# graphic, code o (Kit), or code r (Three-dimensional artifact or
# naturally occurring object)
if l[1] in ("g", "k", "o", "r"):
return "VM"
# Mixed materials: Leader/06 (Type of record) contains code p
# (Mixed material)
if l[1] == "p":
return "MX"
raise ValueError | 21f6ae1ca85c7db62f4d6d868ffc32a1adce3197 | 687,831 |
def cookies_string_to_dict(cookies_string):
"""
Transform cookies which is type of string to the type of dict
"""
if not cookies_string or cookies_string == '':
raise ValueError("Invalid blank param of cookies_string !")
if not isinstance(cookies_string, str):
raise TypeError("Invalid type of cookies_string !")
cookies_dict = {}
for single_mapping_item in cookies_string.split(";"):
single_mapping_item = single_mapping_item.strip().replace("\t", "").replace("\n", "")
if '=' not in single_mapping_item:
continue
kv_list = single_mapping_item.split('=')
if len(kv_list) == 0:
continue
cookies_dict[kv_list[0]] = kv_list[1]
return cookies_dict | 300fdd5adb189ac025727bfe2d08947aac87dac4 | 687,834 |
def classname(cls):
"""Return the name of a class"""
return cls.__name__ | d907d4c78e4300dbc0ba0a185fd9b5ca85f11511 | 687,839 |
def normalize_lang(lang):
"""Normalize input languages string
>>> from pyams_utils.i18n import normalize_lang
>>> lang = 'fr,en_US ; q=0.9, en-GB ; q=0.8, en ; q=0.7'
>>> normalize_lang(lang)
'fr,en-us;q=0.9,en-gb;q=0.8,en;q=0.7'
"""
return lang.strip() \
.lower() \
.replace('_', '-') \
.replace(' ', '') | d3ce6b111bdf5cad98f643955631dd9389a6d849 | 687,840 |
def doi_to_directory(doi):
"""Converts a doi string to a more directory-friendly name
Parameters
----------
doi : string
doi
Returns
-------
doi : string
doi with "/" and ":" replaced by "-" and "-" respectively
"""
return doi.replace("/", "-").replace(":", "-") | 3bc20f48fbc1048da0324a5053561c685ae66b98 | 687,845 |
def get_soil_texture_superclass_id(superclass: str):
"""Get soil texture superclass ID.
Parameters
----------
superclass : str
Superclass from {L, S, T, U}.
Returns
-------
int
ID of superclass
"""
superclass_dict = {"L": 0, "S": 1, "T": 2, "U": 3}
return superclass_dict[superclass[-1]] | 0fa642ed21ae0d46374fbb408e0640e34ff823f5 | 687,848 |
from datetime import datetime
def write_fn(period: datetime = datetime.now(), prefix: str = 'xbrlrss', ext: str = '.xml') -> str:
"""Write the filename with pattern prefix-YYYY-MM-.xml
Args:
period (datetime, optional): Date from which year and month come. Defaults to datetime.now().
prefix (str, optional): The prefix for the filename. Defaults to 'xbrlrss'.
ext (str, optional): The extension of the file. Defaults to '.xml'.
Raises:
ValueError: The period is out-of-bound.
Returns:
str: String with pattern prefix-YYYY-MM-.xml.
"""
# the xbrl rss from EDGAR begin in 2005-04. They are not available before.
limits = (datetime(year=2005, month=4, day=1), datetime(2100, 1, 1))
if period < limits[0] or period >= limits[1]:
msg = f"The period must be between {limits[0]} and {limits[1]}.\nperiod: {period}"
raise ValueError(msg)
fn = '-'.join((prefix, str(period.year), str(period.month).zfill(2))) + ext
assert len(fn) > len(ext) + 1
return fn | 02df0ac8300fb6adf06c0b6b10daa993f8cc57bc | 687,855 |
def fullName(first: str, middle: str, last: str) -> str:
"""
Compose parts of a name into a full name.
"""
if middle:
return f"{first} {middle}. {last}"
else:
return f"{first} {last}" | 1cd1fb8fedbbe090794cb1571fc47c62923d1f56 | 687,857 |
from typing import List
from typing import Tuple
def write_inertia(inertia: List[Tuple[int, int]]):
"""
Given an inertia decomposition as returned by `compute_inertia`, create the string
version that is latex displayable.
Example:
>>> write_inertia([(2, 1), (2, 3), (1, 2), (1, 1)])
'(1^2 1 2^3 2)'
Args:
inertia: The inertia representation as returned by `compute_inertia`
"""
inertia.sort(key=lambda x: (x[0], -x[1]))
elts = [f"{f}^{e}" if e > 1 else f"{f}" for f, e in inertia]
return "(" + " ".join(elts) + ")" | ceb62482209ce7a68cade8f7c673c52594f68dcc | 687,862 |
def xyz_to_zyx(data, x=0, y=0, z=0):
"""
Reverses dimensions of matrix.
Assumes data is not flattened. If it is flattened, pass in y, x, z parameters
:param data:
:param y:
:param x:
:param z:
"""
x = data.shape[0] if x == 0 else x
y = data.shape[1] if y == 0 else y
z = data.shape[2] if z == 0 else z
trans = data.swapaxes(0, 2)
return trans | 508000b8a76917170269e98392b3a88b0ba9ecd9 | 687,863 |
def lowercase(raw_text: str) -> str:
"""
>>> lowercase("This is NeW YoRk wIth upPer letters")
'this is new york with upper letters'
"""
return raw_text.lower() | d486c8c6d1f15cc80038bd67994067464cdbbda5 | 687,864 |
def create_obj(d):
"""Create an object from a dict"""
return type('D', (object,), d)() | 5a95b32ed5f7e7b47919da3da590d13c80901d91 | 687,866 |
def get_option(name,project,default=None):
"""
Get an option flag from the project structure, returning None if
the flag is not defined.
"""
if 'options' not in project:
return default
if name not in project['options']:
return default
return project['options'][name] | 6e59ca528c5cdfaf0a1fb126d91df663686c8a06 | 687,867 |
import re
def read_segmentation(word_boundary_path):
"""
Args:
word_boundary_paths (list): list of paths to word boundary files
Returns:
segmentation_dict (dict):
key (string): utt_index
value (list): list of [start_frame, end_frame, word]
"""
segmentation_dict = {}
with open(word_boundary_path, 'r') as f:
for line in f:
line = re.sub(r'[\s]+', ' ', line.strip().lower().expandtabs(1))
line = line.split(' ')
# speaker = line[0].split('-')[0]
utt_index = line[0].split('-')[-1]
start_frame = int(float(line[1]) * 100 + 0.05)
end_frame = int(float(line[2]) * 100 + 0.05)
word = line[3].replace('[silence]', '')
if utt_index not in segmentation_dict.keys():
segmentation_dict[utt_index] = []
segmentation_dict[utt_index].append([start_frame, end_frame, word])
return segmentation_dict | 3f13c702e457bacc6d470d0529269339a0247f8d | 687,868 |
import re
def fixSlashes(url):
"""
Removes double slashes in URLs, and ensures the protocol is doubleslahed
"""
# insert missing protocol slashes
p = re.compile( '(:/)([^/])' )
url = p.subn( r'\1/\2', url)[0]
# strip any double slashes excluding ://
p = re.compile( '([^:])//' )
return p.subn( r'\1/', url)[0] | 8213ff86e85d49a829f4eff8627a97c33e984a9e | 687,871 |
import secrets
import binascii
def generate_consul_key(unique=False):
"""
Generate a consul key.
https://www.consul.io/docs/security/encryption
Key generated per the following description:
https://github.com/hashicorp/consul/blob/b3292d13fb8bbc8b14b2a1e2bbae29c6e105b8f4/command/keygen/keygen.go
""" # noqa
keybytes = secrets.token_bytes(32)
ckey = binascii.b2a_base64(keybytes)
return ckey.decode("utf-8").strip() | 645a5a37fe7a9ceb46b60fd209ded7f0a9c8222a | 687,873 |
def find_index(token, low, high, features): # binary search to find element index in list
"""
# Binary search helper method
# helps finding token index
# with O(log n) time whereas
# np.where() will need O(n)
"""
if high >= low:
mid = int((high + low) / 2)
if features[mid] == token:
return mid
elif features[mid] > token:
return find_index(token, low, mid - 1, features)
else:
return find_index(token, mid + 1, high, features)
return -1 | 181443fa1a95419c28e357b646ecf98e00cdeeaf | 687,875 |
def _calc_target_shape(target_range):
"""Returns the shape of the target image."""
return tuple((target_range[:, 1] - target_range[:, 0]).astype(int)) | b62c079653cd3ed4fd16eee09179b72d2ce6e2a8 | 687,876 |
def _convert(x):
"""Generically convert strings to numbers.
:param x: string that maybe represents a number
:returns: value
:rtype: string, float, or int
"""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x | 14ac5fdee202adabc8aac927bac9a36ca0b3d806 | 687,878 |
def is_imported_from_same_module(the_class: str, imported_name: str) -> bool:
"""
Is the class imported from another module?
:param the_class: the class object itself
:param imported_name: name of the imported class
:return: true if the class was imported from another module
"""
return ".".join(imported_name.split(".")[:-1]) == the_class.__module__ | 713ddc8b012c70c6261c91f5224b83788c951455 | 687,880 |
def checkPrefix(s, prefix):
"""
Return a pair (hasPrefix, rest).
If prefix is a prefix of s:
hasPrefix is True
rest is everything after the prefix
Else:
hasPrefix is False
rest is s
"""
if s.startswith(prefix):
return (True, s[len(prefix):])
else:
return (False, s) | f3f552ae3f4b0c5b8cc6a58259fd61a8b8147092 | 687,882 |
import io
import wave
def buffer_to_wav(buffer: bytes) -> bytes:
"""Wraps a buffer of raw audio data (16-bit, 16Khz mono) in a WAV"""
with io.BytesIO() as wav_buffer:
wav_file: wave.Wave_write = wave.open(wav_buffer, mode="wb")
with wav_file:
wav_file.setframerate(16000)
wav_file.setsampwidth(2)
wav_file.setnchannels(1)
wav_file.writeframes(buffer)
return wav_buffer.getvalue() | 1112c080c11696cbb5f54b9fe49fcf7b8c45e966 | 687,883 |
import torch
def discriminator_loss(logits_r,logits_m,logits_f,logits_f_prime):
"""
Computes the discriminator loss described in the homework pdf
using the bce_loss function.
Inputs:
- logits_r: PyTorch Variable of shape (N,) giving scores for the real data.
- logits_f: PyTorch Variable of shape (N,) giving scores for the fake data.
Returns:
- loss: PyTorch Variable containing (scalar) the loss for the discriminator.
"""
###########################
######### TO DO ###########
###########################
loss = None
loss = torch.log(logits_r) + 0.5*(torch.log(1-logits_m)+0.5*(torch.log(1-logits_f)+torch.log(1-logits_f_prime)))
loss = loss.mean()
return loss | 5d2a5ea19d1818f2043310fa20be9efdc27e98fe | 687,884 |
import toml
def parse_toml_file(file_object):
"""Parses toml data using a file-like object to a dictionary.
Args:
file_object: (file-like object) file like object instantiated from a toml formatted file
Returns:
A dictionary with parsed toml data fields.
"""
return toml.load(file_object) | 5fe70fec0d35ef5cfc124c0d8ea3c185b84a8e54 | 687,887 |
def escape_delimit(s):
"""
escapes bytes 0x7e, 0x7d, 0x11 (XON), 0x13 (XOFF)
0x7e and 0x7d will be escaped to 0x7d 0x5e and 0x7d 0x5d
0x11 and 0x13 will be escaped to 0x7d 0x31 and 0x7d 0x33
0x7e - packet start/end
0x7d - escape character. escapes the following byte by inverting bit 5.
example:
40 09 00 be ef 05 7d 06 01 02 03 04 05
becomes:
7e 40 09 00 be ef 05 7d 5d 06 01 02 03 04 05 7e
"""
r = []
r.append(chr(0x7e))
for c in s:
cc = ord(c)
if cc == 0x7d or cc == 0x7e or cc == 0x11 or cc == 0x13:
r.append(chr(0x7d))
r.append(chr(cc ^ 32))
else:
r.append(c)
r.append(chr(0x7e))
return "".join(r) | 4828e77a2dc14f32d3a2865fb37564a1b7147132 | 687,890 |
def csv_file(tmp_path):
"""Generate a csv file for test purposes
Args:
tmp_path: temporary area to use to write files
Returns:
path to the csv file
"""
tmp_path.mkdir(exist_ok=True)
csv_file_path = tmp_path / "file.csv"
csv_file_path.write_text("x,y\n0,0\n1,1\n")
return csv_file_path | 2b2df6bb396d7bf0e18d9d427f0c81ed30a050e8 | 687,891 |
from typing import Tuple
def calculate_center_coords(
cell_index: Tuple[int, int], cell_size: Tuple[int, int]
) -> Tuple[int, int]:
"""Calculate cell center coordinates.
:param cell_index: selected cell index
:param cell_size: given cell size (height, width)
:return: given cell center coordinates (y, x)
"""
y = int(cell_size[0] * (cell_index[0] + 0.5))
x = int(cell_size[1] * (cell_index[1] + 0.5))
return y, x | c510f55695770a7e3effcb82dd2e59c9a1cd36bc | 687,892 |
def cleanup_comment(comment):
"""Given a dictionary of a comment record, return a new dictionary for
output as JSON."""
comm_data = comment
comm_data["id"] = str(comm_data["_id"])
comm_data["user"] = str(comm_data["user"])
comm_data["post"] = str(comm_data["post"])
comm_data["created"] = str(comm_data["created"].ctime())
del comm_data["_id"]
return comm_data | 54354dfe276911e00b84518310f188378708caab | 687,897 |
def tag2String(tag):
"""Takes a tag placement, and turns it into a verbose string"""
return ("Tag with ID #%d, was placed @ (%f,%f), facing %f deg" %
(tag['id'], tag['x'], tag['y'], tag['th_deg'])) | 5eac5e77228e81efe9968475c85b9d79ab5a2960 | 687,900 |
def get_level_size(slide, level):
"""Returns the dimensions of a level
"""
return slide.level_dimensions[level] | cb3279b82331152edc8d477ca842f035881621fe | 687,903 |
import torch
def regularization_loss(params, weight):
"""Compute regularization loss.
Args:
params: iterable of all parameters
weight: weight for the regularization term
Returns:
the regularization loss
"""
l2_reg = 0
for param in params:
l2_reg += torch.norm(param)
return weight * l2_reg | bee4f75d3782e9c3ba059d0de0cfb6819abd493d | 687,906 |
def format_network_speed(raw_bps=0):
""" Formats a network speed test to human readable format """
fmt = ['b/s', 'Kb/s', 'Mb/s', 'Gb/s']
index = 0
speed = raw_bps
while speed > 1024:
index += 1
speed /= 1024
return "%0.2f %s" % (speed, fmt[index]) | 9d716c135feb862ac1be6d522987302716fec9b1 | 687,909 |
def derivative_from_polycoefficients(coeff, loc):
"""
Return derivative of a polynomial of the form
f(x) = coeff[0] + coeff[1]*x + coeff[2]*x**2 + ...
at x = loc
"""
derivative = 0.
for n, c in enumerate(coeff):
if n == 0:
continue
derivative += n*c*loc**(n-1)
return derivative | 29991fa0bccb29af85376759ce8c212606c027b2 | 687,912 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.