content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def bin_to_str(val):
""" Converts binary integer to string of 1s and 0s """
return str(bin(val))[2:] | f3043dfb8ca6f8c9cc4b3d7405ed47b96e886f33 | 682,558 |
import signal
def register_signal_handler(sig, handler):
"""Registers a signal handler."""
return signal.signal(sig, handler) | 1a845312b958f70dc6efd2fdcb491bb37ac2cae7 | 682,562 |
def AfterTaxIncome(combined, expanded_income, aftertax_income,
Business_tax_expinc, corp_taxliab):
"""
Calculates after-tax expanded income.
Parameters
----------
combined: combined tax liability
expanded_income: expanded income
corp_taxliab: imputed corporate tax liability
Returns
-------
aftertax_income: expanded_income minus combined
"""
if Business_tax_expinc is True:
expanded_income = expanded_income + corp_taxliab
else:
expanded_income = expanded_income
aftertax_income = expanded_income - combined
return aftertax_income | 6c8f570169356d81c40cfd358df1d036ab15b4b3 | 682,567 |
def _find_tbl_starts(section_index, lines):
"""
next three tables are the hill, channel, outlet
find the table header lines
:param section_index: integer starting index of section e.g.
"ANNUAL SUMMARY FOR WATERSHED IN YEAR"
:param lines: loss_pw0.txt as a list of strings
:return: hill0, chn0, out0
index to line starting the hill, channel, and outlet table
"""
header_indx = []
for i, L in enumerate(lines[section_index + 2:]):
if L.startswith('----'):
header_indx.append(i)
hill0 = header_indx[0] + 1 + section_index + 2
chn0 = header_indx[1] + 2 + section_index + 2 # channel and outlet summary
out0 = header_indx[2] + 2 + section_index + 2 # have a blank line before the data
return hill0, chn0, out0 | a64feb7ccf444c8ec57928ce4a6756143ed2a11d | 682,572 |
def decode_list_index(list_index: bytes) -> int:
"""Decode an index for lists in a key path from bytes."""
return int.from_bytes(list_index, byteorder='big') | 3e7616fe04bf60e69772ead027617d75ba75988a | 682,575 |
def mode_decomposition(plant):
"""Returns a list of single mode transfer functions
Parameters
----------
plant : TransferFunction
The transfer function with at list one pair of complex poles.
Returns
-------
wn : array
Frequencies (rad/s).
q : array
Q factors.
k : array
Dcgains of the modes.
"""
poles = plant.pole()
complex_mask = poles.imag > 0 # Avoid duplication
wn = abs(poles[complex_mask]) # Frequencies
q = wn/(-2*poles[complex_mask].real) # Q factors of the modes
k = abs(plant(1j*wn)/q) # DC gain of the modes
return wn, q, k | e2520a0e0f860b16127a9a16081f1b9c33c792fd | 682,577 |
def assign_province_road_conditions(x):
"""Assign road conditions as paved or unpaved to Province roads
Parameters
x - Pandas DataFrame of values
- code - Numeric code for type of asset
- level - Numeric code for level of asset
Returns
String value as paved or unpaved
"""
asset_code = x.code
asset_level = x.level
# This is an expressway, national and provincial road
if asset_code in (17, 303) or asset_level in (0, 1):
return 'paved'
else:
# Anything else not included above
return 'unpaved' | 81fe01661bf184dd52795f4b8b0ecf3c04e4f917 | 682,579 |
import ast
def guess_type(string):
"""
Guess the type of a value given as string and return it accordingly.
Parameters
----------
string : str
given string containing the value
"""
string = str(string)
try:
value = ast.literal_eval(string)
except Exception: # SyntaxError or ValueError
return string
else:
return value | 8436957a6c1ef8eb35812ff75b6091af9ccb6eed | 682,580 |
from typing import List
def split_high_level(s: str,
c: str,
open_chars: List[str],
close_chars: List[str]
) -> List[str]:
"""Splits input string by delimiter c.
Splits input string by delimiter c excluding occurences
of character c that are between open_chars close_chars
:param s: string to split
:type s: str
:param c: delimiter character
:type c: str
:param open_chars: list of block opening characters
:type open_chars: List[str]
:param close_chars: list of block closing characters
:type close_chars: List[str]
:return: list of strings containing the splitted blocks
:rtype: List[str]
"""
splits = []
index = 0
depth = 0
start = 0
while index < len(s) - 1:
index += 1
if s[index] in open_chars:
depth += 1
elif s[index] in close_chars:
depth -= 1
if depth == 0 and s[index] == c:
splits.append(s[start:index:])
start = index + 1
else:
if start < len(s) - 1:
splits.append(s[start::])
return splits | a9b3db6b43ae62fc18feecaee43cc56689e02546 | 682,582 |
def tonumpy(img):
"""
Convert torch image map to numpy image map
Note the range is not change
:param img: tensor, shape (C, H, W), (H, W)
:return: numpy, shape (H, W, C), (H, W)
"""
if len(img.size()) == 2:
return img.cpu().detach().numpy()
return img.permute(1, 2, 0).cpu().detach().numpy() | 2e045b817be6d88f463315222a26dbc13389319a | 682,586 |
import re
def substitute_query_params(query, params):
"""
Substitutes the placeholders of the format ${n} (where n is a non-negative integer) in the query. Example, ${0}.
${n} is substituted with params[n] to generate the final query.
"""
placeholders = re.findall("((\${)(\d+)(}))", query)
for placeholder in placeholders:
placeholder_str, placeholder_index = placeholder[0], placeholder[2]
value = params[int(placeholder_index)]
query = query.replace(placeholder_str, value)
return query | 89bbc525a549e8484fa1e2a8b4da3d0083b37aa8 | 682,587 |
def group_text(text, n=5):
"""Groups the given text into n-letter groups separated by spaces."""
return ' '.join(''.join(text[i:i+n]) for i in range(0, len(text), n)) | 0918cda00b4ac0e1692bde3c939207eb4fa8896e | 682,594 |
def p_length_unit(units):
"""Returns length units string as expected by pagoda weights and measures module."""
# NB: other length units are supported by resqml
if units.lower() in ['m', 'metre', 'metres']: return 'metres'
if units.lower() in ['ft', 'foot', 'feet', 'ft[us]']: return 'feet'
assert(False) | 31d5c355231a23f3e71dd35328862d1522ef91e2 | 682,595 |
from typing import Callable
def fname(func: Callable) -> str:
"""Return fully-qualified function name."""
return "{}.{}".format(func.__module__, func.__name__) | e7c6f6b0d2eb955066a4bc68c6184e263b5352a1 | 682,598 |
def getNrOfDictElements(thisdict):
"""
Will get the total number of entries in a given dictionary
Argument: the source dictionary
Output : an integer
"""
total = 0
for dicttype in thisdict:
for dictval in thisdict[dicttype]:
total += 1
return total | 4721f023d2059924697622edd9a3786ac28f08cb | 682,610 |
import random
def mutate_agent(agent_genome, max_mutations=3):
"""
Applies 1 - `max_mutations` point mutations to the given road trip.
A point mutation swaps the order of two waypoints in the road trip.
"""
agent_genome = list(agent_genome)
num_mutations = random.randint(1, max_mutations)
for mutation in range(num_mutations):
swap_index1 = random.randint(0, len(agent_genome) - 1)
swap_index2 = swap_index1
while swap_index1 == swap_index2:
swap_index2 = random.randint(0, len(agent_genome) - 1)
agent_genome[swap_index1], agent_genome[swap_index2] = agent_genome[swap_index2], agent_genome[swap_index1]
return tuple(agent_genome) | 130204f0ffa2134a053a55202f26252366eecdba | 682,613 |
from typing import Sequence
def strictly_increasing(s: Sequence):
"""Returns true if sequence s is monotonically increasing"""
return all(x < y for x, y in zip(s, s[1:])) | 010134cd75c5ca6dc88bfdcb73a7a79d719e83ff | 682,614 |
import random
def generate_number(digits):
"""Generate an integer with the given number of digits."""
low = 10**(digits-1)
high = 10**digits - 1
return random.randint(low, high) | a123a4312329d446aef8dad25a26c14a2bc24a8e | 682,617 |
def _applescriptify_str(text):
"""Replace double quotes in text for Applescript string"""
text = text.replace('"', '" & quote & "')
text = text.replace('\\', '\\\\')
return text | 591883251dde88c21ec3f01285a113814676c745 | 682,618 |
def unpack_spectrum(HDU_list):
"""
Unpacks and extracts the relevant parts of an SDSS HDU list object.
Parameters
----------
HDU_list : astropy HDUlist object
Returns
-------
wavelengths : ndarray
Wavelength array
flux : ndarray
Flux array
z : float
Redshift of the galaxy
"""
table_data = HDU_list[0].header
z = HDU_list[2].data[0][63]
wavelengths = 10 ** HDU_list[1].data['loglam']
flux = HDU_list[1].data['flux']
return wavelengths, flux, z | 8536e50ffaea2301519a5a046d54a9d256de05c8 | 682,619 |
import re
def is_stable_version(version):
"""
Return true if version is stable, i.e. with letters in the final component.
Stable version examples: ``1.2``, ``1.3.4``, ``1.0.5``.
Non-stable version examples: ``1.3.4beta``, ``0.1.0rc1``, ``3.0.0dev0``.
"""
if not isinstance(version, tuple):
version = version.split('.')
last_part = version[-1]
if not re.search(r'[a-zA-Z]', last_part):
return True
else:
return False | db5056654dea881d5beec076541d08c129a790c7 | 682,620 |
def read_symfile(path='baserom.sym'):
"""
Return a list of dicts of label data from an rgbds .sym file.
"""
symbols = []
for line in open(path):
line = line.strip().split(';')[0]
if line:
bank_address, label = line.split(' ')[:2]
bank, address = bank_address.split(':')
symbols += [{
'label': label,
'bank': int(bank, 16),
'address': int(address, 16),
}]
return symbols | 5bfb861cd82424c955984c27cb08631021712d53 | 682,623 |
def cert_bytes(cert_file_name):
"""
Parses a pem certificate file into raw bytes and appends null character.
"""
with open(cert_file_name, "rb") as pem:
chars = []
for c in pem.read():
if c:
chars.append(c)
else:
break
# null-terminated certs, for compatibility
return chars + [0] | 6d10553832f11d514b62bb206f97ce3c167723d1 | 682,625 |
def cupy_cuda_MemoryPointer(cp_arr):
"""Return cupy.cuda.MemoryPointer view of cupy.ndarray.
"""
return cp_arr.data | b87e8f0a9952f637817077fcdc621eb6441367ab | 682,626 |
def extend(*dicts):
"""Create a new dictionary from multiple existing dictionaries
without overwriting."""
new_dict = {}
for each in dicts:
new_dict.update(each)
return new_dict | e5f6e4797eab6cc6c6b2c23f0766b6ff23f5f8e1 | 682,630 |
def mult (x, y):
"""
multiply : 2 values
"""
return x * y | f4292f0b88be0ee49df3572066a8cef4c3cb11fe | 682,631 |
def combine_on_sep(items, separator):
"""
Combine each item with each other item on a `separator`.
Args:
items (list): A list or iterable that remembers order.
separator (str): The SEPARATOR the items will be combined on.
Returns:
list: A list with all the combined items as strings.
"""
combined_items = []
# Combine items only if there is more than 1 items.
if len(items) > 1:
# Convert the empty string back to minus.
items = [i if i!='' else '-' for i in items]
for i, item in enumerate(items):
reduced_items = items[:i] + items[i+1:]
combined_with_item = [separator.join((item, r)) for r in reduced_items]
combined_items += combined_with_item
# Else simply return the given list.
else:
combined_items = items
return combined_items | f57f605c5ba186af992ad8e3818a0b97158f64ec | 682,633 |
def swapPos(list:list, pos1:int, pos2:int):
"""
Swap two elements in list. Return modified list
"""
list[pos1], list[pos2] = list[pos2], list[pos1]
return list | 6977b7f9f7040b63552ae5188eced46c3c2cfdb9 | 682,634 |
import csv
def team2machine(cur_round):
"""Compute the map from teams to machines."""
# Get the config with the teamname-source mappings.
config_name = f"configs/config_round_{cur_round}.csv"
team_machine = {}
with open(config_name, 'r') as infile:
reader = csv.reader(infile)
for row in reader:
team_machine[row[0]] = row[1]
return team_machine | e81c7a7321127a70a7ead1fcf6be6feed54a9c40 | 682,635 |
import torch
def systematic_sampling(weights: torch.Tensor) -> torch.Tensor:
"""Sample ancestral index using systematic resampling.
Get from https://docs.pyro.ai/en/stable/_modules/pyro/infer/smcfilter.html#SMCFilter
Args:
log_weight: log of unnormalized weights, tensor
[batch_shape, num_particles]
Returns:
zero-indexed ancestral index: LongTensor [batch_shape, num_particles]
"""
with torch.no_grad():
batch_shape, size = weights.shape[:-1], weights.size(-1)
n = weights.cumsum(-1).mul_(size).add_(torch.rand(batch_shape + (1,), device=weights.device))
n = n.floor_().long().clamp_(min=0, max=size)
diff = torch.zeros(batch_shape + (size + 1,), dtype=torch.long, device=weights.device)
diff.scatter_add_(-1, n, torch.tensor(1, device=weights.device, dtype=torch.long).expand_as(weights))
ancestors = diff[..., :-1].cumsum(-1).clamp_(min=0, max=size-1)
return ancestors | 6e38e8f8eb37075cae838e7cf8d77aeaf984f7aa | 682,636 |
def get_column_as_list(matrix, column_no):
"""
Retrieves a column from a matrix as a list
"""
column = []
num_rows = len(matrix)
for row in range(num_rows):
column.append(matrix[row][column_no])
return column | 0f8f234fa9c68852d1c2f88049e2d0fea0df746d | 682,637 |
def _GetDefault(t):
"""Returns a string containing the default value of the given type."""
if t.element_type or t.nullable:
return None # Arrays and optional<T> are default-initialized
type_map = {
'boolean': 'false',
'double': '0',
'DOMString': None, # std::string are default-initialized.
}
assert t.name in type_map, 'Type %s not found' % t.name
return type_map[t.name] | 8fadb2db6211fa18fc2dc4d1306bd68113d2bb4f | 682,641 |
import configparser
def get_downstream_distgit_branch(dlrn_projects_ini):
"""Get downstream distgit branch info from DLRN projects.ini"""
config = configparser.ConfigParser()
config.read(dlrn_projects_ini)
return config.get('downstream_driver', 'downstream_distro_branch') | fefde39830b80618606a9b4906b061726ac9b706 | 682,642 |
def get_defined_enums(conn, schema):
"""
Return a dict mapping PostgreSQL enumeration types to the set of their
defined values.
:param conn:
SQLAlchemy connection instance.
:param str schema:
Schema name (e.g. "public").
:returns dict:
{
"my_enum": frozenset(["a", "b", "c"]),
}
"""
sql = """
SELECT
pg_catalog.format_type(t.oid, NULL),
ARRAY(SELECT enumlabel
FROM pg_catalog.pg_enum
WHERE enumtypid = t.oid)
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE
t.typtype = 'e'
AND n.nspname = %s
"""
return {r[0]: frozenset(r[1]) for r in conn.execute(sql, (schema,))} | eea2f3e533eab4f4059b6c86f67d4683a4cc7644 | 682,643 |
def find_valid_edges(components, valid_edges):
"""Find all edges between two components in a complete undirected graph.
Args:
components: A [V]-shaped array of boolean component ids. This assumes
there are exactly two nonemtpy components.
valid_edges: An uninitialized array where output is written. On return,
the subarray valid_edges[:end] will contain edge ids k for all
valid edges.
Returns:
The number of valid edges found.
"""
k = 0
end = 0
for v2, c2 in enumerate(components):
for v1 in range(v2):
if c2 ^ components[v1]:
valid_edges[end] = k
end += 1
k += 1
return end | 83d6a8237c45c74bd695d2f54371b19ae81e493e | 682,644 |
def find_column_by_utype(table, utype):
"""
Given an astropy table derived from a VOTABLE, this function returns
the first Column object that has the given utype.
The name field of the returned value contains the column name and can be used
for accessing the values in the column.
Parameters
----------
table : astropy.table.Table
Astropy Table which was created from a VOTABLE (as if by astropy_table_from_votable_response).
utype : str
The utype identifying the column to be found.
Returns
-------
astropy.table.Column
The first column found which had the given utype. None is no such column found.
Example
-------
col = find_column_by_utype(my_table, 'Access.Reference')
print ('1st row access_url value is:', my_table[col.name][0])
"""
# Loop through all the columns looking for the utype
for key in table.columns:
col = table.columns[key]
utypeval = col.meta.get('utype')
if utypeval is not None:
if utype == utypeval:
return col
return None | e1347af38ecb406cdc9c5c7976422e0606c3efd9 | 682,646 |
def calc_node_attr(node, edge):
"""
Returns a tuple of (cltv_delta, min_htlc, fee_proportional) values of the node in the channel.
"""
policy = None
if node == edge['node1_pub']:
policy = edge['node1_policy']
elif node == edge['node2_pub']:
policy = edge['node2_policy']
else:
raise Exception('node ' + node + ' is not a peer to channel ' + edge['channel_id'])
return policy['time_lock_delta'], policy['min_htlc'], policy['fee_rate_milli_msat'] | 5d0eb77cb233e96ba340d4afa42c4478872735d6 | 682,648 |
def format_mac(mac: str) -> str:
"""
Format the mac address string.
Helper function from homeassistant.helpers.device_registry.py
"""
to_test = mac
if len(to_test) == 17 and to_test.count("-") == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count(":") == 5:
to_test = to_test.replace(":", "")
elif len(to_test) == 14 and to_test.count(".") == 2:
to_test = to_test.replace(".", "")
if len(to_test) == 12:
# no - included
return "-".join(to_test.lower()[i : i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac | 283fe3c294bbe1eb30017d6a61587e824256bc73 | 682,652 |
def present(element, act):
"""Return True if act[element] is valid and not None"""
if not act:
return False
elif element not in act:
return False
return act[element] | 872f6b73d873f3c4ef5967e531d2056c151566a8 | 682,654 |
def format_output(tosave, formatter):
"""
Applies the formatting function, formatter, on tosave.
If the resulting string does not have a newline adds it.
Otherwise returns the formatted string
:param tosave: The item to be string serialized
:param formatter: The formatter function applied to item
:return: The formatted string after formatter has been applied to tosave
"""
formatted = formatter(tosave)
if not formatted.endswith('\n'):
formatted = formatted + '\n'
return formatted | 03ff2ec8b10edbf2a54b21841505cb26e2522617 | 682,656 |
def _update_query(table: str, field: str, is_jsonb: bool, from_string: str,
to_string: str) -> str:
"""Generates a single query to update a field in a PostgresSQL table.
Args:
table (str): The table to update.
field (str): The field to update.
is_jsonb (bool): Whether the field is jsonb or not.
from_string (str): The string to replace.
to_string (str): The string to replace with.
Returns:
str: The query to update the field.
"""
const = f'UPDATE {table} SET "{field}" = replace({field}' # noqa
if is_jsonb:
query = f'{const}::TEXT, \'{from_string}\', \'{to_string}\')::jsonb;'
else:
query = f'{const}, \'{from_string}\', \'{to_string}\');'
return query | 1d90246cfd464b40f3235310e97d2ad38c30fcb3 | 682,657 |
def generateFilenameFromUrl(url):
"""
Transforms a card URL into a local filename in the format
imageCache/setname_cardnumber.jpg.
"""
return 'imageCache/' + '_'.join(url.split('/')[1:]) | d1541b12edf451636b7c3823061ea13266ee2e0e | 682,663 |
import shutil
def move(src_path, dest_path, raise_error=False):
"""
Moves a file or folder from ``src_path`` to ``dest_path``. If either the source or
the destination path no longer exist, this function does nothing. Any other
exceptions are either raised or returned if ``raise_error`` is False.
"""
err = None
try:
shutil.move(src_path, dest_path)
except FileNotFoundError:
# do nothing of source or dest path no longer exist
pass
except OSError as exc:
err = exc
if raise_error and err:
raise err
else:
return err | 77633febeccff8cc9ba6dea4f1ef89c07a471d42 | 682,664 |
def unblock_list(blocked_ips_list, to_block_list):
""" This function creates list of IPs that are present in the firewall block list, but not in
the list of new blockings which will be sent to the firewall.
:param blocked_ips_list: List of blocked IPs.
:param to_block_list: List of new blockings.
:return: List of IPs to be unblocked.
"""
to_be_unblocked_list = []
for blocked in blocked_ips_list:
found_ip = False
blocked_ip = blocked['ip']
for host in to_block_list:
if host['host']['ip_address'] == blocked_ip:
found_ip = True
# if the blocked_ip was not found in list of blockings, unblock it
if not found_ip:
to_be_unblocked_list.append(blocked_ip)
return to_be_unblocked_list | 74b29a975c530eb22b3f9fc6ed7ce9c924e8baee | 682,665 |
import csv
import re
def get_keys_from_file(file):
"""
Reads a file and creates a list of dictionaries.
:param file: Filename relative to project root.
:return: lkeys - a list of dictionaries
[{
Key: '00484545-2000-4111-9000-611111111111',
Region: 'us-east-1'
},
{
Key: '00484545-2000-4111-9000-622222222222',
Region: 'us-east-1'
}]
"""
lkeys = []
regex = '[0-9A-Za-z]{8}-[0-9A-Za-z]{4}-4[0-9A-Za-z]{3}-[89ABab][0-9A-Za-z]{3}-[0-9A-Za-z]{12}'
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in reader:
if len(row) > 3 and re.search(regex, row[2]):
lkeys.append({'Key': row[2], 'Region': row[0]})
return lkeys | ae0b89bf216b54e012b26fed3ec7ab0bbb6e288e | 682,667 |
def file_list(redis, task_id):
"""Returns the list of files attached to a task"""
keyf = "files:" + task_id
return redis.hkeys(keyf) | 422103ae390c47e707cc17ef1bd08d042d433ecc | 682,671 |
def _parse_quad_str(s):
"""Parse a string of the form xxx.x.xx.xxx to a 4-element tuple of integers"""
return tuple(int(q) for q in s.split('.')) | 79dcb2bffad831a7f60fd471ea5f4b747e693d1e | 682,676 |
def flatten_dict(dct, separator='-->', allowed_types=[int, float, bool]):
"""Returns a list of string identifiers for each element in dct.
Recursively scans through dct and finds every element whose type is in
allowed_types and adds a string indentifier for it.
eg:
dct = {
'a': 'a string',
'b': {
'c': 1.0,
'd': True
}
}
flatten_dict(dct) would return
['a', 'b-->c', 'b-->d']
"""
flat_list = []
for key in sorted(dct):
if key[:2] == '__':
continue
key_type = type(dct[key])
if key_type in allowed_types:
flat_list.append(str(key))
elif key_type is dict:
sub_list = flatten_dict(dct[key])
sub_list = [str(key) + separator + sl for sl in sub_list]
flat_list += sub_list
return flat_list | ca978d848f1f10c3a92e20e1e93f8ffb461eaff6 | 682,678 |
def _parse_tersoff_line(line):
"""
Internal Function.
Parses the tag, parameter and final value of a function parameter
from an atomicrex output line looking like:
tersoff[Tersoff].lambda1[SiSiSi]: 2.4799 [0:]
Returns:
[(str, str, float): tag, param, value
"""
line = line.strip().split()
value = float(line[1])
info = line[0].split("].")[1]
info = info.split("[")
param = info[0]
tag = info[1].rstrip("]:")
return tag, param, value | df984a7e6dafb17b7a90400bc2320765327bbbbe | 682,683 |
def find_IPG_hits(group, hit_dict):
"""Finds all hits to query sequences in an identical protein group.
Args:
group (list): Entry namedtuples from an IPG from parse_IP_groups().
hit_dict (dict): All Hit objects found during cblaster search.
Returns:
List of all Hit objects corresponding to any proteins in a IPG.
"""
seen = set()
hits = []
for entry in group:
try:
new = hit_dict.pop(entry.protein_id)
except KeyError:
continue
for h in new:
if h.query in seen:
continue
seen.add(h.query)
hits.append(h)
return hits | ef6f345b12644dd8f4dd0fa8664a304772ff77cc | 682,685 |
def get_mc_filepath(asm_path):
"""
Get the filepath for the machine code.
This is the assembly filepath with .asm replaced with .mc
Args:
asm_path (str): Path to the assembly file.
Returns:
str: Path to the machine code file.
"""
return "{basepath}.mc".format(basepath=asm_path[:-4]) | 7eb283e3d231a0217f1c9459b27fdd4f451c46ee | 682,687 |
import re
def encode(name, system='NTFS'):
"""
Encode the name for a suitable name in the given filesystem
>>> encode('Test :1')
'Test _1'
"""
assert system == 'NTFS', 'unsupported filesystem'
special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32)))
pattern = '|'.join(map(re.escape, special_characters))
pattern = re.compile(pattern)
return pattern.sub('_', name) | 1c8df18bde7535b8d7729bc708fd041236bd10fd | 682,697 |
from itertools import tee
def pairs(iterable):
"""
Return a new iterable over sequential pairs in the given iterable.
i.e. (0,1), (1,2), ..., (n-2,n-1)
Args:
iterable: the iterable to iterate over the pairs of
Returns: a new iterator over the pairs of the given iterator
"""
# lazily import tee from `itertools`
# split the iterator into 2 identical iterators
a, b = tee(iterable)
# retrieve the next item from the b iterator
next(b, None)
# zip up the iterators of current and next items
return zip(a, b) | 8b33a15cfabff0940a47d1634ea1a9a7c97882be | 682,698 |
import re
def convert_spaces_and_special_characters_to_underscore(name):
""" Converts spaces and special characters to underscore so 'Thi$ i# jun&' becomes 'thi__i__jun_'
:param name: A string
:return: An altered string
Example use case:
- A string might have special characters at the end when they are really the same field such as My_Field$ and My_Field#
- We use this to covert the names to "my_field" to combine the values so the events will be easily grouped together
Examples:
.. code-block:: python
# Example #1
input_string = '$Scr "get-rid^-of-the@" special #characters%&space'
output_string = convert_spaces_and_special_characters_to_underscore(input_string)
output_string = '_scr__get_rid__of_the___special__characters__space'
"""
clean_name = re.sub(r'[\W_]', '_', name)
return clean_name.lower() | 1317911480df2d7c93ef02086c747bd126e615ad | 682,700 |
def uid_already_processed(db, notification_uid):
"""Check if the notification UID has already been processed."""
uid = db.document(notification_uid).get()
if uid.exists:
return True
else:
return False | 81aa82bb9d18188d34dcbb4fb599b90cd741cddf | 682,704 |
def count_matched_numbers(a:list, b:list):
"""
Write a function that takes two lists as parameters and returns a list
containing the number of elements that occur in the same index in both
lists. If there is not common elements in the same indices in both lists, return False
Example:
>>> count_matched_numbers([1, 2, 3], [1, 2, 3])
3
>>> count_matched_numbers([1, 2, 3], [1, 5, 3, 4, 5])
2
>>> count_matched_numbers([1, 2, 3], [4,5,6])
False
"""
matching = 0
length_of_b = len(b)
if len(b) == 0:
return 0
for key,value_1 in enumerate(a):
if key>= length_of_b:
break # len(a)>len(b) AND we reached the limit
the_type = type(value_1) # getting the type of the element in first list
try:
value_2 = the_type(b[key]) # casting types
except Exception as e:
continue # Sometime casting may fail
# Now we are sure that value_1 and value_2 have the ame type
if value_1 == value_2:
matching += 1
#print(matching)
if matching:
return matching
else:
return False | 23713c31f4eef35feecce17aed82eeb63fe30144 | 682,706 |
import binascii
def from_bytes(bytes):
"""Reverse of to_bytes()."""
# binascii works on all versions of Python, the hex encoding does not
return int(binascii.hexlify(bytes), 16) | 55c3984c2b9c9ad8b0ea8c4a821eee27bf2e3f63 | 682,707 |
def runningMean(oldMean, newValue, numValues):
# type: (float, float, int) -> float
"""
A running mean
Parameters
----------
oldMean : float
The old running average mean
newValue : float
The new value to be added to the mean
numValues : int
The number of values in the new running mean once this value is included
Returns
-------
newMean : float
The new running average mean
Notes
-----
Based on Donald Knuth’s Art of Computer Programming, Vol 2, page 232, 3rd edition and taken from
https://www.johndcook.com/blog/standard_deviation/
Examples
--------
>>> runningMean(1, 2, 2)
1.5
>>> runningMean(1.5, 3, 3)
2.0
"""
newMean = oldMean + (newValue - oldMean) / numValues
return newMean | eb1fc1b59f2f1a81ef01347e6cde968e2bd01bbd | 682,710 |
from typing import Tuple
from typing import Optional
def validate_parse_directions(verb: str, direction: str) -> Tuple[Optional[str], Optional[str]]:
"""Parse north, south, east, west to n, s, e, w and if the user types trash
insult them and return a tuple like, (None, some validation message)"""
if len(direction) >= 1:
# shorten [N]orth to N
direction = direction[0]
if direction in "nsew":
return verb, direction
else:
return None, "Bad direction, you idiot! Only n, s, e, w!"
else:
return None, "Missing direction" | 0190c34bb619cd5cf5f56224492c2421b1766b11 | 682,712 |
def read_file_lines(filename):
"""Reads the lines of a file as a list"""
with open(filename, mode="r") as f_in:
return f_in.readlines() | c8cfaeddb71c9905125291ef27299b15d6f8e31c | 682,715 |
def get_list_hashes(lst):
"""
For LZ to be more effective, than worst case 'fresh' raw dump,
you need to lz encode minimum 4 bytes length. Break into 4 bytes chunks and hash
Parameters
----------
lst : list
Input list to form hashes from (lz haystack).
Returns
-------
enumerated list: (n, hash)
list of enumerated hashes.
"""
assert len(lst) > 0, 'Empty list in list hashes'
return [(x, hash(tuple(lst[x:x + 4]))) for x in range(len(lst))] | cf8668ff2da8c130bbeb80fdc84ad1b9566fa7b6 | 682,716 |
def remove_ext(filename: str, ext: str) -> str:
"""
Remove a file extension. No effect if provided extension is missing.
"""
if not ext.startswith('.'):
ext = f'.{ext}'
parts = filename.rsplit(ext, 1)
if len(parts) == 2 and parts[1] == '':
return parts[0]
else:
return filename | 2951ad8420f8b1b63951b76d976129632fa0b1d6 | 682,719 |
def parse_to_dicts(lines, containers):
"""
Parses a list of lines into tuples places in the given containers.
The lists of lines has the following format
token1: tval1
key1: val11
key2: val12
token1: tval2
key1: val21
key2: val22
:param lines:
:param containers: a dictionary { token : array_instance}
:return:
"""
pairs = [(a, b.strip()) for a, b in (m.split(':', 1) for m in lines)]
item = {}
kind, name = None, None
for j in range(0, len(pairs)):
if pairs[j][0] in containers.keys():
if j != 0:
containers[kind].append((name, item))
item = {}
kind = pairs[j][0]
name = pairs[j][1]
else:
item[pairs[j][0]] = pairs[j][1]
if kind is not None:
containers[kind].append((name, item))
return containers | b696159b7cb651723e18d6d84a0403d4a5513785 | 682,720 |
def is_number(s):
"""
Test if a string is an int or float.
:param s: input string (word)
:type s: str
:return: bool
"""
try:
float(s) if "." in s else int(s)
return True
except ValueError:
return False | 82c02c121ac863c9f6cbf034dec95abc7120f7cc | 682,722 |
def get_locale_identifier(tup, sep='_'):
"""The reverse of :func:`parse_locale`. It creates a locale identifier out
of a ``(language, territory, script, variant)`` tuple. Items can be set to
``None`` and trailing ``None``\s can also be left out of the tuple.
>>> get_locale_identifier(('de', 'DE', None, '1999'))
'de_DE_1999'
.. versionadded:: 1.0
:param tup: the tuple as returned by :func:`parse_locale`.
:param sep: the separator for the identifier.
"""
tup = tuple(tup[:4])
lang, territory, script, variant = tup + (None,) * (4 - len(tup))
return sep.join(filter(None, (lang, script, territory, variant))) | a1c694d49fee5e021e534cf17b4b434b92207dc7 | 682,723 |
def to_channel_last(tensor):
"""Move channel axis in last position."""
return tensor.permute(1, 2, 0) | ba17da336aab38e6ccf7570cfb13fc36a6f02398 | 682,725 |
def _is_file_a_directory(f):
"""Returns True is the given file is a directory."""
# Starting Bazel 3.3.0, the File type as a is_directory attribute.
if getattr(f, "is_directory", None):
return f.is_directory
# If is_directory is not in the File type, fall back to the old method:
# As of Oct. 2016, Bazel disallows most files without extensions.
# As a temporary hack, Tulsi treats File instances pointing at extension-less
# paths as directories. This is extremely fragile and must be replaced with
# logic properly homed in Bazel.
return (f.basename.find(".") == -1) | d2d98fcdcc4b8baabd3846a2b1c0e93553f48eb6 | 682,728 |
def correct_bounding_box(x1, y1, x2, y2):
""" Corrects the bounding box, so that the coordinates are small to big """
xmin = 0
ymin = 0
xmax = 0
ymax = 0
if x1 < x2:
xmin = x1
xmax = x2
else:
xmin = x2
xmax = x1
if y1 < y2:
ymin = y1
ymax = y2
else:
ymin = y2
ymax = y1
return [xmin, ymin, xmax, ymax] | fdf585fd4a37811b2797042feb828191ff2bac79 | 682,730 |
def train_test_split_features(data_train, data_test, zone, features):
"""Returns a pd.DataFrame with the explanatory variables and
a pd.Series with the target variable, for both train and test data.
Args:
data_train (pd.DataFrame): Train data set.
data_tes (pd.DataFrame): Test data set.
zone (int): The zone id (id of the wind farm).
features (list): A list of the column names to be used.
Returns:
(pd.DataFrame): Explanatory variables of train data set.
(pd.Series): Target variable fo train data set.
(pd.DataFrame): Explanatory variables of test data set.
(pd.Series): Target variable fo test data set.
"""
X_train = data_train[data_train.ZONEID == zone][features]
y_train = data_train[data_train.ZONEID == zone].TARGETVAR
X_test = data_test[data_test.ZONEID == zone][features]
y_test = data_test[data_test.ZONEID == zone].TARGETVAR
return X_train, X_test, y_train, y_test | 889889fc1c8ee4ba2758e35f70a8d23d66b5274d | 682,736 |
import torch
def create_random_binary_mask(features):
"""
Creates a random binary mask of a given dimension with half of its entries
randomly set to 1s.
:param features: Dimension of mask.
:return: Binary mask with half of its entries set to 1s, of type torch.Tensor.
"""
mask = torch.zeros(features).byte()
weights = torch.ones(features).float()
num_samples = features // 2 if features % 2 == 0 else features // 2 + 1
indices = torch.multinomial(
input=weights, num_samples=num_samples, replacement=False
)
mask[indices] += 1
return mask | 4f98743fc27d4abec219b9317dd16e9c61a0eac9 | 682,738 |
def pbar(val, maxval, empty='-', full='#', size=50):
"""
return a string that represents a nice progress bar
Parameters
----------
val : float
The fill value of the progress bar
maxval : float
The value at which the progress bar is full
empty : str
The character used to draw the empty part of the bar
full : str
The character used to draw the full part of the bar
size : integer
The lenght of the bar expressed in number of characters
Returns
-------
br : str
The string containing the progress bar
Examples
--------
>>> a = pbar(35.5, 100, size=5)
>>> print(a)
35.50% [##----]
"""
br = "{{1: 6.2f}}% [{{0:{}<{}s}}]".format(empty, size)
br = br.format(full*int(size*val/maxval), val*100/maxval)
return br | ff3bfa9a170bce1a25adfef1df792550b4c20326 | 682,739 |
def hasShapeType(items, sort):
"""
Detect whether the list has any items of type sort. Returns :attr:`True` or :attr:`False`.
:param shape: :class:`list`
:param sort: type of shape
"""
return any([getattr(item, sort) for item in items]) | 9a1c8dbe0a850cc45d4be0ac9d1327b778dc987a | 682,740 |
def reestructure_areas(config_dict):
"""Ensure that all [Area_0, Area_1, ...] are consecutive"""
area_names = [x for x in config_dict.keys() if x.startswith("Area_")]
area_names.sort()
for index, area_name in enumerate(area_names):
if f"Area_{index}" != area_name:
config_dict[f"Area_{index}"] = config_dict[area_name]
config_dict.pop(area_name)
return config_dict | fa875e950a6d1e049418716d7a535cf03369e873 | 682,742 |
def get_section_markups(document, sectionLabel):
""" Given a ConTextDocument and sectionLabel, return an ordered list of the ConTextmarkup objects in that section"""
tmp = [(e[1],e[2]['sentenceNumber']) for e in document.getDocument().out_edges(sectionLabel, data=True) if
e[2].get('category') == 'markup']
tmp.sort(key=lambda x:x[1])
return [t[0] for t in tmp] | 705766d2c155cb3afe7f0bc3582ae3b719e237e2 | 682,745 |
def absolute_min(array):
"""
Returns absolute min value of a array.
:param array: the array.
:return: absolute min value
>>> absolute_min([1, -2, 5, -8, 7])
1
>>> absolute_min([1, -2, 3, -4, 5])
1
"""
return min(array, key=abs) | 2cd53c58d6d03a3f9238cf3aed8b66d9399497dd | 682,747 |
def serialize_drive(drive) -> str:
"""
Serialize the drive residues and calibration state to xml.
"""
drivexml = drive.state_to_xml()
return drivexml | e331abd406e383c7fb310d1ba15d89147f130d4e | 682,748 |
def return_list_smart(func):
"""
Decorator. If a function is trying to return a list
of length 1 it returns the list element instead
"""
def inner(*args, **kwargs):
out = func(*args, **kwargs)
if not out:
return None
if len(out) == 1:
return out[0]
else:
return out
return inner | 520dd849dd3dbbc9c02efd0084b09561dbe1433a | 682,749 |
def vec_bin(v, nbins, f, init=0):
"""
Accumulate elements of a vector into bins
Parameters
----------
v: list[]
A vector of scalar values
nbins: int
The number of bins (accumulators) that will be returned as a list.
f: callable
A function f(v)->[bo,...,bn] that maps the vector into the "bin space".
Accepts one argument (a vector element) and returns a 2-tuple
(i, val), where 'i' is the bin index to be incremented and 'val'
the increment value. If no bin shall be incremented, set i to None.
init: scalar, optional
A scalar value used to initialize the bins. Defaults to 0.
Returns
-------
list
A list of bins with the accumulated values.
"""
acc = [init] * nbins
for e in v:
i, val = f(e)
if i is not None:
acc[i] += val
return acc | 9de0ad00389cf35c619f3121104929d7d2dd6bea | 682,750 |
import requests
from bs4 import BeautifulSoup
def get_soup(url):
"""
input url, output a soup object of that url
"""
page=requests.get(url)
soup = BeautifulSoup(page.text.encode("utf-8"), 'html.parser')
return soup | 078ac5096adfc1302690d117b356378b13318d96 | 682,754 |
import functools
def if_inactive(f):
"""decorator for callback methods so that they are only called when inactive"""
@functools.wraps(f)
def inner(self, loop, *args, **kwargs):
if not self.active:
return f(self, loop, *args, **kwargs)
return inner | dbcf634463e8aa118610f2398ab88e0214f13540 | 682,755 |
import asyncio
import functools
def async_test(loop=None):
"""Wrap an async test in a run_until_complete for the event loop."""
loop = loop or asyncio.get_event_loop()
def _outer_async_wrapper(func):
"""Closure for capturing the configurable loop."""
@functools.wraps(func)
def _inner_async_wrapper(*args, **kwargs):
return loop.run_until_complete(func(*args, **kwargs))
return _inner_async_wrapper
return _outer_async_wrapper | cc3e6597c6c93c9e6f6715de22b562e1a61aa3db | 682,757 |
def get_strains(output_file):
"""
Returns a dictionary that maps cell id to strain.
Takes Biocellion output as the input file.
"""
strain_map = {}
with open(output_file, 'r') as f:
for line in f:
if line.startswith("Cell:"):
tokens = line.split(',')
cell = int(tokens[0].split(':')[1])
strain = int(tokens[1].split(':')[1])
strain_map[cell] = strain
return strain_map | a4ce24cd0f4cb213ee42e611f178b1415a5506be | 682,758 |
def is_iterable(x):
"""Returns True for all iterables except str, bytes, bytearray,
else False."""
return hasattr(x, "__iter__") and not isinstance(x, (str, bytes, bytearray)) | 176a0682260c754b467425a99b2885f00b590f24 | 682,760 |
def bprop_scalar_log(x, out, dout):
"""Backpropagator for primitive `scalar_log`."""
return (dout / x,) | 7813b2d9464d6cd03e2f6e0c19f709cd1c250130 | 682,762 |
def _get_drive_distance(maps_response):
"""
from the gmaps response object, extract the driving distance
"""
try:
return maps_response[0].get('legs')[0].get('distance').get('text')
except Exception as e:
print(e)
return 'unknown distance' | ca0fabec0931fd8816a7ca7554b81a19a84c4010 | 682,765 |
def calc_tcp(gamma, td_tcd, eud):
"""Tumor Control Probability / Normal Tissue Complication Probability
1.0 / (1.0 + (``td_tcd`` / ``eud``) ^ (4.0 * ``gamma``))
Parameters
----------
gamma : float
Gamma_50
td_tcd : float
Either TD_50 or TCD_50
eud : float
equivalent uniform dose
Returns
-------
float
TCP or NTCP
"""
return 1.0 / (1.0 + (td_tcd / eud) ** (4.0 * gamma)) | bdf54c53da1863ca03db0d909d5e7dd740dd388d | 682,766 |
def get_enrollment(classroom, enrolled_only=False):
"""Gets the list of students that have enrolled.
Args:
classroom: The classroom whose enrollment to get.
enrolled_only: If True, only return the set that are enrolled. Default is
False.
Returns:
Set of Enrollment entries for the classroom.
"""
if enrolled_only:
query = classroom.enrollment_set.filter('is_enrolled =', True)
else:
query = classroom.enrollment_set
return query.fetch(classroom.max_enrollment) | b3af3085a804b0b7098fcfa0981c8978993a207b | 682,769 |
def get_price_for_market_state_crypto(result):
"""Returns the price for the current state of the market for Cryptocurrency symbols"""
## Crypto always on REGULAR market state, as it never sleeps ZZzzZZzzz...
return {
"current": result['regularMarketPrice']['fmt'],
"previous": result['regularMarketPreviousClose']['fmt'],
"change": result['regularMarketChange']['fmt'],
"percent": result['regularMarketChangePercent']['fmt']
} | 327339bd8d907f289e0cfaa129c29f63c58fc76d | 682,772 |
from datetime import datetime
def get_latest_archive_url(data: dict) -> str:
"""
Use the given metadata to find the archive URL of the latest image.
"""
metadata = data[-1]
img = metadata["image"]
date = datetime.strptime(metadata["date"], "%Y-%m-%d %H:%M:%S")
archive_path = f"{date.year:04}/{date.month:02}/{date.day:02}/png/{img}.png"
archive_url = f"https://epic.gsfc.nasa.gov/archive/natural/{archive_path}"
return archive_url | f9c73147dd3ce8e7a5f93a633679d658df58b0bc | 682,773 |
import shlex
def generate_input_download_command(url, path):
"""Outputs command to download 'url' to 'path'"""
return f"python /home/ft/cloud-transfer.py -v -d {shlex.quote(url)} {shlex.quote(path)}" | d12c7d662dfe96467c43be3e42567fc2b06626f9 | 682,774 |
def _calculate_shrinking_factor(initial_shrinking_factor: float, step_number: int, n_dim: int) -> float:
"""The length of each in interval bounding the parameter space needs to be multiplied by this number.
Args:
initial_shrinking_factor: in each step the total volume is shrunk by this amount
step_number: optimization step -- if we collected only an initial batch, this step is 1
n_dim: number of dimensions
Example:
Assume that ``initial_shrinking_factor=0.5`` and ``step_number=1``. This means that the total volume should
be multiplied by :math:`1/2`. Hence, if there are :math:`N` dimensions (``n_dim``), the length of each
bounding interval should be multiplied by :math:`1/2^{1/N}`.
However, if ``step_number=3``, each dimension should be shrunk three times, i.e. we need to multiply it by
:math:`1/2^{3/N}`.
Returns:
the shrinking factor for each dimension
"""
assert 0 < initial_shrinking_factor < 1, (
f"Shrinking factor must be between 0 and 1. " f"(Was {initial_shrinking_factor})."
)
assert step_number >= 1 and n_dim >= 1, (
f"Step number and number of dimensions must be greater than 0. "
f"(Where step_number={step_number}, n_dim={n_dim})."
)
return initial_shrinking_factor ** (step_number / n_dim) | f8f99842e91ba59ba345e7bd2d608b0b0c56bef0 | 682,780 |
def calc_grv(thickness, height, area, top='slab', g=False):
"""Calculate GRV for given prospect
Args:
thickness [float]: average thickness of reservoir
height [float]: height of hydrocarbon column
area [float]: area of hydrocarbon prospect
top: structure shape, one of `{'slab', 'round', 'flat'}`
g [float]: geometric correction factor
Returns:
grv following `thickness * area * geometric_correction_factor`
"""
if g:
g = g
else:
ratio = thickness / height
if top == 'round':
g = -0.6 * ratio + 1
elif top == 'flat':
g = -0.3 * ratio + 1
else:
g = 1
return thickness * area * g, g | 71e7d257cc0bc4fd73a36e728d69b3599f902f9d | 682,784 |
import struct
def decode_override_information(data_set):
"""decode result from override info
:param tuple result_set: bytes returned by the system parameter query command R_RI for
override info
:returns: dictionary with override info values
:rtype: dict
"""
override_info = dict()
override_info['Feed_override'] = struct.unpack('!L', data_set[0:4])[0]/100
override_info['Speed_override'] = struct.unpack('!L', data_set[4:8])[0]/100
override_info['Rapid_override'] = struct.unpack('!L', data_set[8:12])[0]/100
return override_info | 38236b3676cfd8c05b2b13c1a637d5d428662fc1 | 682,786 |
def is_link_displayed(link: str, source: str):
"""Check if the link is explicitly displayed in the source.
Args:
link: a string containing the link to find in the webpage source code.
source: the source code of the webpage.
Returns:
True is the link is visible in the webpage, False otherwise.
"""
return ('>' + link + '</a>') in source or (
'>' + link[:link.find('.')]) in source | 129a3f006ccf47fe9eae219f69f4b746ec707bdf | 682,788 |
import codecs
def decode_utf_8_text(text):
"""Decode the text from utf-8 format
Parameters
----------
text : str
String to be decoded
Returns
-------
str
Decoded string
"""
try:
return codecs.decode(text, 'utf-8')
except:
return text | 703f136602bf128f875590142e9c6abb3de7b890 | 682,789 |
def create_user2(django_user_model):
"""
create another user
"""
user = django_user_model.objects.create_user(
username='user_2', email='another@gmail.com', password='pass123'
)
return user | ecfc30dd0ec55ee0e14c3d3d50930f17a5055d58 | 682,790 |
from datetime import datetime
def _time_stamp_filename(fname, fmt='%Y-%m-%d_{fname}'):
"""
Utility function to add a timestamp to names of uploaded files.
Arguments:
fname (str)
fmt (str)
Returns: str
"""
return datetime.now().strftime(fmt).format(fname=fname) | f5b5780b86a1dc958ce6dc012b3fe84f49a19bdb | 682,795 |
def _format_size(x: int, sig_figs: int = 3, hide_zero: bool = False) -> str:
"""
Formats an integer for printing in a table or model representation.
Expresses the number in terms of 'kilo', 'mega', etc., using
'K', 'M', etc. as a suffix.
Args:
x (int) : The integer to format.
sig_figs (int) : The number of significant figures to keep
hide_zero (bool) : If True, x=0 is replaced with an empty string
instead of '0'.
Returns:
str : The formatted string.
"""
if hide_zero and x == 0:
return str("")
def fmt(x: float) -> str:
# use fixed point to avoid scientific notation
return "{{:.{}f}}".format(sig_figs).format(x).rstrip("0").rstrip(".")
if abs(x) > 1e14:
return fmt(x / 1e15) + "P"
if abs(x) > 1e11:
return fmt(x / 1e12) + "T"
if abs(x) > 1e8:
return fmt(x / 1e9) + "G"
if abs(x) > 1e5:
return fmt(x / 1e6) + "M"
if abs(x) > 1e2:
return fmt(x / 1e3) + "K"
return str(x) | db72f2e6db8ef32ad9ce011e8aac8d6877c0f8f8 | 682,797 |
def normalize(df, col_name, replace=True):
"""Normalize number column in DataFrame
The normalization is done with max-min equation:
z = (x - min(x)) / (max(x) - min(x))
replace -- set to False if it's desired to return new DataFrame
instead of editing it.
"""
col = df[col_name]
norm_col = (col - col.min()) / (col.max() - col.min())
if replace:
df[col_name] = norm_col
return df
else:
norm_df = df.copy()
norm_df[col_name] = norm_col
return norm_df | e9e458977677f09ce53f08a8e83b004ce0c043e4 | 682,802 |
import torch
def csv_collator(samples):
"""Merge a list of samples to form a batch.
The batch is a 2-element tuple, being the first element
the BxHxW tensor and the second element a list of dictionaries.
:param samples: List of samples returned by CSVDataset as (img, dict) tuples.
"""
imgs = []
dicts = []
for sample in samples:
img = sample[0]
dictt = sample[1]
# # We cannot deal with images with 0 objects (WHD is not defined)
# if dictt['count'][0] == 0:
# continue
imgs.append(img)
dicts.append(dictt)
data = torch.stack(imgs)
return data, dicts | f4dedd5829ea7c434a662730ae7de5ea6ef56553 | 682,803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.