content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def is_draft(record, ctx): """Shortcut for links to determine if record is a draft.""" return record.is_draft
246ed4e954daa693f6d128278fbeb89c914b5f1a
682,279
def approx_equal(a_value: float, b_value: float, tolerance: float = 1e-4) -> float: """ Approximate equality. Checks if values are within a given tolerance of each other. @param a_value: a value @param b_value: b value @param tolerance: @return: boolean indicating values are with in given radius """ return abs(a_value - b_value) <= max(abs(a_value), abs(b_value)) * tolerance
e081bdd07c18a3163035028303e9707284f5a75a
682,280
def create_zero_matrix(rows: int, columns: int) -> list: """ Creates a matrix rows * columns where each element is zero :param rows: a number of rows :param columns: a number of columns :return: a matrix with 0s e.g. rows = 2, columns = 2 --> [[0, 0], [0, 0]] """ if not isinstance(rows, int) or not isinstance(columns, int) or \ isinstance(rows, bool) or isinstance(columns, bool): return [] zero_matrix = [] n_columns = [0] * columns if n_columns: zero_matrix = [[0] * columns for _ in range(rows)] return zero_matrix
4e4a759d318f33f962a1d924c4893d5af7043b94
682,281
def _fix_attribute_names(attrs, change_map): """ Change attribute names as per values in change_map dictionary. Parameters ---------- :param attrs : dict Dict of operator attributes :param change_map : dict Dict of onnx attribute name to mxnet attribute names. Returns ------- :return new_attr : dict Converted dict of operator attributes. """ new_attr = {} for k in attrs.keys(): if k in change_map: new_attr[change_map[k]] = attrs[k] else: new_attr[k] = attrs[k] return new_attr
857c8e2eaf4652bb6a5f6207dd0a332ad43971cf
682,282
def strip_token(token) -> str: """ Strip off suffix substring :param token: token string :return: stripped token if a suffix found, the same token otherwise """ if token.startswith(".") or token.startswith("["): return token pos = token.find("[") # If "." is not found if pos < 0: pos = token.find(".") # If "[" is not found or token starts with "[" return token as is. if pos <= 0: return token return token[:pos:]
d8fe892b51f08b52a3de43acef7b636f553adc13
682,287
def str_to_tile_index(s, index_of_a = 0xe6, index_of_zero = 0xdb, special_cases = None): """ Convert a string to a series of tile indexes. Params: s: the string to convert index_of_a: begining of alphabetical tiles index_of_zero: begining of numerical tiles special_cases: what to if a character is not alpha-numerical special_case can be: None - non alpha-numerical characters are skipped callable - special cases is called for each non alpha-numerical character dict - a correspondance table between characters and their index """ res = [] for c in s: index = None if ord(c) >= ord('a') and ord(c) <= ord('z'): index = ord(c) - ord('a') + index_of_a elif ord(c) >= ord('A') and ord(c) <= ord('Z'): index = ord(c) - ord('A') + index_of_a elif ord(c) >= ord('0') and ord(c) <= ord('9'): index = ord(c) - ord('0') + index_of_zero elif callable(special_cases): index = special_cases(c) elif special_cases is not None: index = special_cases.get(c) if index is not None: res.append(index) return res
2835743b026beb78edc561758bcffc19a87ff67e
682,288
import csv def load_data(filename): """ Load shopping data from a CSV file `filename` and convert into a list of evidence lists and a list of labels. Return a tuple (evidence, labels). evidence should be a list of lists, where each list contains the following values, in order: - Administrative, an integer - Administrative_Duration, a floating point number - Informational, an integer - Informational_Duration, a floating point number - ProductRelated, an integer - ProductRelated_Duration, a floating point number - BounceRates, a floating point number - ExitRates, a floating point number - PageValues, a floating point number - SpecialDay, a floating point number - Month, an index from 0 (January) to 11 (December) - OperatingSystems, an integer - Browser, an integer - Region, an integer - TrafficType, an integer - VisitorType, an integer 0 (not returning) or 1 (returning) - Weekend, an integer 0 (if false) or 1 (if true) labels should be the corresponding list of labels, where each label is 1 if Revenue is true, and 0 otherwise. """ evidence = [] labels = [] # Read in the data and clean it with open(filename) as csvfile: reader = list(csv.DictReader(csvfile)) int_lst = [ 'Administrative', 'Informational', 'ProductRelated', 'Month', 'OperatingSystems', 'Browser', 'Region', 'TrafficType', 'VisitorType', 'Weekend' ] flt_lst = [ 'Administrative_Duration', 'Informational_Duration', 'ProductRelated_Duration', 'BounceRates', 'ExitRates', 'PageValues', 'SpecialDay' ] month_dict = { 'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'June': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8, 'Oct': 9, 'Nov': 10, 'Dec': 11 } for row in reader: # Set month to numerical value from dict row['Month'] = month_dict[row['Month']] row['VisitorType'] = 1 if row['VisitorType'] == 'Returning_Visitor' else 0 row['Weekend'] = 1 if row['Weekend'] == 'TRUE' else 0 row['Revenue'] = 1 if row['Revenue'] == 'TRUE' else 0 # Typecast to int all items in int_lst for item in int_lst: row[item] = int(row[item]) # Typecast to float all items in flt_lst for item in flt_lst: row[item] = float(row[item]) # Append the cleaned data into the new lists evidence.append(list(row.values())[:-1]) labels.append(list(row.values())[-1]) return (evidence, labels)
535072672faceda545f46793154b807f18fdd490
682,294
def edges_from_matchings(matching): """ Identify all edges within a matching. Parameters ---------- matching : list of all matchings returned by matching api Returns ------- edges : list of all edge tuples """ edges = [] nodes = [] # only address highest confidence matching? m = 0 for match in matching: print(m) m += 1 # only address good matchings if match['confidence'] > 0.95: # look through all legs for leg in match['legs']: # Extract all node sets node = leg['annotation']['nodes'] for i in range(len(node) - 1): if (node[i], node[i+1], 0) not in edges: edges.append((node[i], node[i+1], 0)) return edges
14932530bdb1f5e0796d8c7f33449bd830d3b129
682,295
def find_faces_with_vertex(index, faces): """ For a given vertex, find all faces containing this vertex. Note: faces do not have to be triangles. Parameters ---------- index : integer index to a vertex faces : list of lists of three integers the integers for each face are indices to vertices, starting from zero Returns ------- faces_with_vertex : list of lists of three integers the integers for each face are indices to vertices, starting from zero Examples -------- >>> # Simple example: >>> from mindboggle.guts.mesh import find_faces_with_vertex >>> faces = [[0,1,2],[0,2,3],[0,3,4],[0,1,4],[4,3,1]] >>> index = 3 >>> find_faces_with_vertex(index, faces) [[0, 2, 3], [0, 3, 4], [4, 3, 1]] """ faces_with_vertex = [x for x in faces if index in x] return faces_with_vertex
e48f203aa27a38f118d7f7135f9f67166f451e0e
682,297
def definitions_match(definition_objects, expected_descriptor): """ Return whether Definition objects have expected properties. The order of the definitions, and their sources, may appear in any order. expected_descriptor is a shorthand format, consisting of a list or tuple `(definition_string, sources)` where `sources` may be a single string for a single source, or a list of strings for multiple sources. """ # We want to do set comparisons. But to put things inside sets, we have to # first make them hashable by freezing them. comparable_actual = set( (defn.text, frozenset(c.abbrv for c in defn.citations.all())) for defn in definition_objects ) comparable_expected = set( (e[0], frozenset(e[1] if isinstance(e[1], list) else [e[1]])) for e in expected_descriptor ) return comparable_actual == comparable_expected
0ed29ae0666b78a0b99bf8295b64cad5ec4bfcb9
682,298
from typing import Pattern import re import fnmatch def _glob_to_re(glob: str) -> Pattern[str]: """Translate and compile glob string into pattern.""" return re.compile(fnmatch.translate(glob))
663e78d54d8dd34a38f0919a190c2041db648e7e
682,301
def run_dir_name(run_num): """ Returns the formatted directory name for a specific run number """ return "run{:03d}".format(run_num)
ae70f534b0c62912956ac81d2d35751b1d78d176
682,305
def _and(x,y): """Return: x and y (used for reduce)""" return x and y
7255241213594425c9f7efcf5b7edaec56000deb
682,306
def get_policy(observations, hparams): """Get a policy network. Args: observations: Tensor with observations hparams: parameters Returns: Tensor with policy and value function output """ policy_network_lambda = hparams.policy_network action_space = hparams.environment_spec.action_space return policy_network_lambda(action_space, hparams, observations)
b965235a3719eb946aff3a9877a8a12f7509b163
682,313
from typing import Counter def column_checker(chat_df, message_df, attachment_df, handle_df): """ Checks the columns of the major tables for any conflicting column names :param chat_df: the chat table dataframe :param message_df: the message table dataframe :param attachment_df: the attachment table dataframe :param handle_df: the handle table dataframe :return: a list of conflicting column names """ columns = chat_df.columns.tolist() + message_df.columns.tolist() + attachment_df.columns.tolist() + \ handle_df.columns.tolist() conflicts = [item for item, count in Counter(columns).items() if count > 1] return conflicts
2ed34b903aeab58d0868ad3e067bf05ce5ed7634
682,320
def _IsMacro(command): """Checks whether a command is a macro.""" if len(command) >= 4 and command[0] == "MACRO" and command[2] == "=": return True return False
cbe72b9c0c3cbe5dcc85ddb0834a59a7020fc510
682,321
import json from collections import defaultdict def read_config_filters(file_config): """ Parse json file to build the filtering configuration Args: file_config (Path): absolute path of the configuration file Returns: combine (str): filter_param (dict): {key:list of keywords} """ # Standard library imports filter_param = defaultdict(list) with open(file_config, "r") as read_file: config_filter = json.load(read_file) combine = config_filter["COMBINE"] exclusion = config_filter["EXCLUSION"] for key, value in config_filter.items(): if isinstance(value, dict): if value['mode']: filter_param[key] = value["list"] return combine,exclusion,filter_param
ed1b6dcad3cce6beaf3e7c5cf309afc1380b8f88
682,322
def negate_conf(c): """Negate a line of configuration.""" return "no %s" % c
8c57b2a464c14f36fc21f7b7b0b52ba9f09b633a
682,328
def input_s(prompt: str = "", interrupt: str = "", eof: str = "logout") -> str: """ Like Python's built-in ``input()``, but it will give a string instead of raising an error when a user cancel(^C) or an end-of-file(^D on Unix-like or Ctrl-Z+Return on Windows) is received. prompt The prompt string, if given, is printed to standard output without a trailing newline before reading input. interrupt The interrupt string will be returned when a KeyboardInterrupt occurs. eof The end-of-file string will be returned when an EOFError occurs. Note: This implement keeps trailing a new line even when KeyboardInterrupt or EOFError is raised. """ try: return input(prompt) except KeyboardInterrupt: print() return interrupt except EOFError: print() return eof
13c648f3ed3c6c7e11c07d8bb0d42bce68fda24d
682,332
import re def instruction_decoder_name(instruction): """ Given an instruction with the format specified in ARMv7DecodingSpec.py output a unique name that represents that particular instruction decoder. """ # Replace all the bad chars. name = re.sub('[\s\(\)\-\,\/\#]', '_', instruction["name"]) # Append the encoding. name += "_" + instruction["encoding"] # We may end up with double dashes, remove them. name = name.replace("__", "_") return "decode_" + name.lower()
237c036ed844fab8f4185f850faa7da3c1e513cb
682,333
from typing import Optional import requests def get_def_tenant_id(sub_id: str) -> Optional[str]: """ Get the tenant ID for a subscription. Parameters ---------- sub_id : str Subscription ID Returns ------- Optional[str] TenantID or None if it could not be found. Notes ----- This function returns the tenant ID that owns the subscription. This may not be the correct ID to use if you are using delegated authorization via Azure Lighthouse. """ get_tenant_url = ( "https://management.azure.com/subscriptions/{subscriptionid}" + "?api-version=2015-01-01" ) resp = requests.get(get_tenant_url.format(subscriptionid=sub_id)) # Tenant ID is returned in the WWW-Authenticate header/Bearer authorization_uri www_header = resp.headers.get("WWW-Authenticate") if not www_header: return None hdr_dict = { item.split("=")[0]: item.split("=")[1].strip('"') for item in www_header.split(", ") } tenant_path = hdr_dict.get("Bearer authorization_uri", "").split("/") return tenant_path[-1] if tenant_path else None
2d21fb31bf71424b1fbb15771e4cfafe551871b5
682,335
import asyncio async def subprocess_run_async(*args, shell=False): """Runs a command asynchronously. If ``shell=True`` the command will be executed through the shell. In that case the argument must be a single string with the full command. Otherwise, must receive a list of program arguments. Returns the output of stdout. """ if shell: cmd = await asyncio.create_subprocess_shell( args[0], stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) else: cmd = await asyncio.create_subprocess_exec( *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) await cmd.communicate() return cmd
c5d6d1e4f0a12062272ef45d230bd3f4e47180de
682,337
from functools import reduce def djb2(L): """ h = 5381 for c in L: h = ((h << 5) + h) + ord(c) # h * 33 + c return h """ return reduce(lambda h, c: ord(c) + ((h << 5) + h), L, 5381)
fef0331e6b7ca3b3f7b2cac097714cfcfc2b0a78
682,338
def create_user(ssh_fn, name): """Create a user on an instance using the ssh_fn and name. The ssh_fn is a function that takes a command and runs it on the remote system. It must be sudo capable so that a user can be created and the remote directory for the user be determined. The directory for the user is created in /var/lib/{name} :param ssh_fn: a sudo capable ssh_fn that can run commands on the unit. :type ssh_fn: Callable[[str], str] :param name: the name of the user to create. :type name: str :returns: the directory of the new user. :rtype: str """ dir_ = "/var/lib/{name}".format(name=name) cmd = ["sudo", "useradd", "-r", "-s", "/bin/false", "-d", dir_, "-m", name] ssh_fn(cmd) return dir_.strip()
79fca1b6b49d46a231c9f7179b5e034f3722cbca
682,341
from random import shuffle def randomize_ietimes(times, ids = []): """ Randomize the times of the point events of all the ids that are given. This randomization keeps the starting time of each individual and reshuffles its own interevent times. Parameters ---------- times : dictionary of lists The dictionary contains for each element their times of events in a list ids : list of ids If not given, the reshuffling is global, if some ids are given, only those will be used for the reshuffling. Returns ------- times_random : dictionary of lists For each element a list of reshuffled event times """ times_random = dict() if len(ids) == 0: ids = times.keys() for idn in ids: Nevents = len(times[idn]) ietlist = [times[idn][i+1]-times[idn][i] for i in range(Nevents-1)] shuffle(ietlist) t0 = times[idn][0] times_random[idn] = [t0] for i in range(Nevents-1): t0 += ietlist[i] times_random[idn].append(t0) return times_random
29347060cf738ab82d8a6cfe24d22e5426c3dfa7
682,347
def write_csv_string(data): """ Takes a data object (created by one of the read_*_string functions). Returns a string in the CSV format. """ data_return = "" row_num = 0 #Building the string to return #print('data',data) for row in data: data_return += ','.join(str(v) for v in data[row_num].values()) data_return += "\n" row_num += 1 return data_return
c26cc186dfd9232e5e83d56468d11481f0412500
682,348
def convert_list_to_string(input_list): """ Converts a list to a string with each value separated with a new paragraph Parameters ---------- input_list : list List of values Returns ------- output : str String output with each value in `input_list` recorded """ output = '' for each in input_list: output += """ {each} """.format(**locals()) return output
fa2749d7738418244c1d09309adf7cc39cd3fbfb
682,350
import pathlib import hashlib def get_hash(file_path): """Return sha256 hash of file_path.""" text = b"" if (path := pathlib.Path(file_path)).is_file(): text = path.read_bytes() return hashlib.sha256(text).hexdigest()
0afecdb34812dad7ad25509d9430235649a40e1f
682,351
def format_3(value): """Round a number to 3 digits after the dot.""" return "{:.3f}".format(value)
4d27bc15afe130444126e2f3a1149d1726a292f3
682,357
def get_only_first_stacktrace(lines): """Get the first stacktrace because multiple stacktraces would make stacktrace parsing wrong.""" new_lines = [] for line in lines: line = line.rstrip() if line.startswith('+----') and new_lines: break # We don't add the empty lines in the beginning. if new_lines or line: new_lines.append(line) return new_lines
2b7c0e126bddbad3008f2825a829d5d59df3e1f7
682,358
def bande_est_noire(nv): """ Determine si une bande est noire ou blanche a partir de son numero """ return nv % 2 == 1
9fbde2bdc09d9f85f8aad0dd94fb6d06b8172627
682,360
def fmt(obj): """Value formatter replaces numpy types with base python types.""" if isinstance(obj, (str, int, float, complex, tuple, list, dict, set)): out = obj else: # else, assume numpy try: out = obj.item() # try to get value except: out = obj.tolist() # except, must be np iterable; get list return(out)
1f655ca2c8a862368c8e826006074175599e0c16
682,364
def get_first_aligned_bp_index(alignment_seq): """ Given an alignment string, return the index of the first aligned, i.e. non-gap position (0-indexed!). Args: alignment_seq (string): String of aligned sequence, consisting of gaps ('-') and non-gap characters, such as "HA-LO" or "----ALO". Returns: Integer, >= 0, indicating the first non-gap character within alignment_seq. """ index_of_first_aligned_bp = [i for i,bp in enumerate(alignment_seq) if bp != '-'][0] return index_of_first_aligned_bp
d16331cb0cf6e94cfcb8aa04730520aeec915480
682,367
def risingfactorial(n, m): """ Return the rising factorial; n to the m rising, i.e. n(n+1)..(n+m-1). For example: >>> risingfactorial(7, 3) 504 """ r = 1 for i in range(n, n+m): r *= i return r
7fef55ede604d8b5f576608c505edae56822857d
682,369
def get_adoc_title(title: str, level: int) -> str: """Returns a string to generate a ascidoc title with the given title, and level""" return " ".join(["="*level, title, '\n'])
6d59042961df96d4d62ef9228c93a0023f67786d
682,370
def _normalize_typos(typos, replacement_rules): """ Applies all character replacement rules to the typos and returns a new dictionary of typos of all non-empty elements from normalized 'typos'. """ if len(replacement_rules) > 0: typos_new = dict() for key, values in typos.items(): typos_new[key] = list() for item in values: for orig, replacement in replacement_rules: item = item.replace(orig, replacement) item = item.strip() if item: typos_new[key].append(item) return typos_new else: return typos
fc47995303b00bc4d612a6a161dfad4c0bcd8e02
682,371
import csv def create_idf_dict() -> dict: """Returns the idf_dict from tstar_idf.txt""" with open("climate_keywords/tstar_idf.txt", "r", encoding='utf-8', errors='ignore') as f: reader = csv.reader(f) idf_dict = {} for term, idf in reader: idf_dict[term] = float(idf) return idf_dict
57a7d207d27159767de775720c520db09790a83c
682,372
import re def sanitize_json_string(string): """ Cleans up extraneous whitespace from the provided string so it may be written to a JSON file. Extraneous whitespace include any before or after the provided string, as well as between words. Parameters ---------- string : str The string to sanitize. Returns ------- string : str The provided string, sanitized of extraneous whitespace. """ # Clean up whitespace (including line breaks) both between words and # at the ends of the string return re.sub(r"\s+", " ", string, flags=re.UNICODE).strip()
1c62bfc176674f378de3ecc42db629b8b71ccf7b
682,374
def dict_slice(dict_input, start, end) -> dict: """ take slice for python dict :param dict_input: a dict for slicing :param start: start position for splicing :param end: end position for slicing :return: the sliced dict """ keys = dict_input.keys() dict_slice = {} for k in keys[start:end]: dict_slice[k] = dict_input[k] return dict_slice
ec4bb4970074b11de21c686afd1ac9bd28460430
682,384
def extract_barcode(read, plen): """ Extract barcode from Seq and Phred quality. :param read: A SeqIO object. :type read: object :param plen: The length of the barcode. :type plen: num :returns: A SeqIO object with barcode removed and a barcode string. """ barcode = str(read.seq[:plen]) read_b = read[plen:] return read_b, barcode
d6087a32c2c49a71338a32e4f69a7cf064b33894
682,390
def getGroupInputDataLength(hg): """ Return the length of a HDF5 group Parameters ---------- hg : `h5py.Group` or `h5py.File` The input data group Returns ------- length : `int` The length of the data Notes ----- For a multi-D array this return the length of the first axis and not the total size of the array. Normally that is what you want to be iterating over. The group is meant to represent a table, hence all child datasets should be the same length """ firstkey = list(hg.keys())[0] nrows = len(hg[firstkey]) firstname = hg[firstkey].name for value in hg.values(): if len(value) != nrows: raise ValueError(f"Group does not represent a table. Length ({len(value)}) of column {value.name} not not match length ({nrows}) of first column {firstname}") return nrows
f049faff66573b9dfeec304f33143ca915a692c6
682,397
from typing import Dict import re def get_current_zones(filename: str) -> Dict[str, str]: """Get dictionary of current zones and patterns""" res = {} try: for line in open(filename).readlines(): if line.startswith("#"): continue if match := re.match(r"^add (\S+) (\w+)$", line.rstrip()): res[match.group(1).lower()] = match.group(2) except FileNotFoundError: pass return res
5b87653ac19c76e023bf33eb0c81fa2c3b92009c
682,399
def std_ver_minor_uninst_valueerr_iativer(request): """Return a string that looks like it could be a valid IATIver minor version number, but is not.""" return request.param
5d902381b22d03a52c2cbee6d13b178beda9c97a
682,406
def sec_url(period): """ Create url link to SEC Financial Statement Data Set """ url = "".join([ "https://www.sec.gov/files/dera/data/financial-statement-data-sets/", period, ".zip" ]) # handle weird path exception of SEC if period == "2020q1": url = "".join([ "https://www.sec.gov/files/node/add/data_distribution/", period, ".zip" ]) return url
8d38a8dca62a7fd23a04130bd37aed1ed9ae34a0
682,411
def compute_sub(guard_str): """ Given a guard, return its sub-guards """ parens = [] sub = [] for i in range(len(guard_str)): if guard_str[i] == '(': parens.append(i) if guard_str[i] == ')': j = parens.pop() g = guard_str[j:i + 1].strip() if g.startswith('(+') or g.startswith('(-') or g.startswith('(*'): continue sub.append(g) return sub
42e75bd56c0a1cf9a6c8ff8bb92978e423df2314
682,417
def clean_string(s): """ Get a string into a canonical form - no whitespace at either end, no newlines, no double-spaces. """ return s.strip().replace("\n", " ").replace(" ", " ")
d0de7c4d7d398152d23a03f90e86c592e2a4e6ea
682,418
import math def precision_digits(f, width): """Return number of digits after decimal point to print f in width chars. Examples -------- >>> precision_digits(-0.12345678, 5) 2 >>> precision_digits(1.23456789, 5) 3 >>> precision_digits(12.3456789, 5) 2 >>> precision_digits(12345.6789, 5) 1 """ precision = math.log(abs(f), 10) if precision < 0: precision = 0 precision = width - int(math.floor(precision)) precision -= 3 if f < 0 else 2 # sign and decimal point if precision < 1: precision = 1 return precision
473fa791e1d9dc07486250b2b9072c264aa365bc
682,421
def sorted_nodes_by_name(nodes): """Sorts a list of Nodes by their name.""" return sorted(nodes, key=lambda node: node.name)
b73aef773b59204af5ee00f1dc95aab8b5d0b3ca
682,423
def ai(vp,rho): """ Computes de acoustic impedance Parameters ---------- vp : array P-velocity. rho : array Density. Returns ------- ai : array Acoustic impedance. """ ai = vp*rho return (ai)
4879c0095d64ad04911b264524e6a95c277c128a
682,426
def get_bool(bytearray_: bytearray, byte_index: int, bool_index: int) -> bool: """Get the boolean value from location in bytearray Args: bytearray_: buffer data. byte_index: byte index to read from. bool_index: bit index to read from. Returns: True if the bit is 1, else 0. Examples: >>> buffer = bytearray([0b00000001]) # Only one byte length >>> get_bool(buffer, 0, 0) # The bit 0 starts at the right. True """ index_value = 1 << bool_index byte_value = bytearray_[byte_index] current_value = byte_value & index_value return current_value == index_value
34d7a032b90ffaa7eb85e88bd8a57ec5db54a22b
682,430
from typing import Callable def evaluate(population: list, evaluation_fn: Callable) -> list: """Evaluates the given population using the given evaluation function. Params: - population (list<str>): The population of chromosomes to evaluate - evaluation_fn (Callable): The evaluation function to use Returns: - evaluated_population (list<tuple<str, Any>>): The evaluated chromosomes with their scores """ evaluated_population = [(chromosome, evaluation_fn(chromosome)) for chromosome in population] return evaluated_population
a839524618b6871008da4f8c844b2fe123cadffa
682,433
import copy def merge_to_panoptic(detection_dicts, sem_seg_dicts): """ Create dataset dicts for panoptic segmentation, by merging two dicts using "file_name" field to match their entries. Args: detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. Returns: list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in both detection_dicts and sem_seg_dicts that correspond to the same image. The function assumes that the same key in different dicts has the same value. """ results = [] sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} assert len(sem_seg_file_to_entry) > 0 for det_dict in detection_dicts: dic = copy.copy(det_dict) dic.update(sem_seg_file_to_entry[dic["file_name"]]) results.append(dic) return results
e28916b97bf1955f06a6e24208dd6d47422e23c4
682,440
def intersection_pt(L1, L2): """Returns intersection point coordinates given two lines. """ D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x, y else: return False
1ded12cf1e876b468cea6960f8069a48c76ea9ff
682,444
def has_cli_method(script_path): """ Check if a script has a cli() method in order to add it to the main :param script_path: to a python script inside Cider packages :return: Boolean """ file_obj = open(script_path, 'r').read() return "cli()" in file_obj
9afb9a7db73a671dc21c2bc3a23a2b3d6d414ecb
682,446
def lobid_qs(row, q_field='surname', add_fields=[], base_url="https://lobid.org/gnd/search?q="): """ creates a lobid query string from the passed in fields""" search_url = base_url+row[q_field]+"&filter=type:Person" if add_fields: filters = [] for x in add_fields: if x: filters.append(row[x]) search_url = "{} AND {}".format(search_url, "AND ".join(filters)) return search_url
0a69bea98eb2a67b28fd78206ec6f79b1a14719e
682,447
def jaccard(ground, found): """Cardinality of set intersection / cardinality of set union.""" ground = set(ground) return len(ground.intersection(found))/float(len(ground.union(found)))
6e3068fd6f2d2d8aec8aede7d7f7c1dd8ad4fcdf
682,448
from typing import Dict import logging def collect_content_packs_to_install(id_set: Dict, integration_ids: set, playbook_names: set, script_names: set) -> set: """Iterates all content entities in the ID set and extract the pack names for the modified ones. Args: id_set (Dict): Structure which holds all content entities to extract pack names from. integration_ids (set): Set of integration IDs to get pack names for. playbook_names (set): Set of playbook names to get pack names for. script_names (set): Set of script names to get pack names for. Returns: set. Pack names to install. """ packs_to_install = set() id_set_integrations = id_set.get('integrations', []) for integration in id_set_integrations: integration_id = list(integration.keys())[0] integration_object = integration[integration_id] if integration_id in integration_ids: integration_pack = integration_object.get('pack') if integration_pack: logging.info( f'Found integration {integration_id} in pack {integration_pack} - adding to packs to install') packs_to_install.add(integration_object.get('pack')) else: logging.warning(f'Found integration {integration_id} without pack - not adding to packs to install') id_set_playbooks = id_set.get('playbooks', []) for playbook in id_set_playbooks: playbook_object = list(playbook.values())[0] playbook_name = playbook_object.get('name') if playbook_name in playbook_names: playbook_pack = playbook_object.get('pack') if playbook_pack: logging.info(f'Found playbook {playbook_name} in pack {playbook_pack} - adding to packs to install') packs_to_install.add(playbook_pack) else: logging.warning(f'Found playbook {playbook_name} without pack - not adding to packs to install') id_set_script = id_set.get('scripts', []) for script in id_set_script: script_id = list(script.keys())[0] script_object = script[script_id] if script_id in script_names: script_pack = script_object.get('pack') if script_pack: logging.info(f'Found script {script_id} in pack {script_pack} - adding to packs to install') packs_to_install.add(script_object.get('pack')) else: logging.warning(f'Found script {script_id} without pack - not adding to packs to install') return packs_to_install
d31901a0d44aa676389f8f1fe313066993e988a7
682,453
def maximum_severity(*alarms): """ Get the alarm with maximum severity (or first if items have equal severity) Args: *alarms (Tuple[AlarmSeverity, AlarmStatus]): alarms to choose from Returns: (Optional[Tuple[AlarmSeverity, AlarmStatus]]) alarm with maximum severity; none for no arguments """ maximum_severity_alarm = None for alarm in alarms: if maximum_severity_alarm is None or alarm[0] > maximum_severity_alarm[0]: maximum_severity_alarm = alarm return maximum_severity_alarm
d78a221b56e891103e8387d077d8f14dd2d1ce93
682,455
def extractRoi(frame, pos, dia): """ Extracts a region of interest with size dia x dia in the provided frame, at the specied position Input: frame: Numpy array containing the frame pos: 2D position of center of ROI dia: Integer used as width and height of the ROI Output: patch: Numpy array containing hte extracted ROI """ h,w = frame.shape[:2] xMin = max(int(pos[0]-dia/2)+1, 0) xMax = min(xMin + dia, w) yMin = max(int(pos[1]-dia/2)+1, 0) yMax = min(yMin + dia, h) patch = frame[yMin:yMax, xMin:xMax] return patch
0c66811c9f564477e0075a52deaa5d242ffcba22
682,457
import secrets import click def generate_secret_key(num_bytes, show=True): """Generate and print a random string of SIZE bytes.""" rnd = secrets.token_urlsafe(num_bytes) if show: click.secho(rnd) return rnd
917d7a4e9be93b7c8266f53f07f3f3e1cec43865
682,458
def npy_cdouble_from_double_complex(var): """Cast a Cython double complex to a NumPy cdouble.""" res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var) return res
ee9a6c3803a2f572e3dfb5a3e7c822136e630d1e
682,475
def find_highest_degree(graph): """ Find the highest degree in a graph """ degrees = graph.degree() max_degree = 0 for node in degrees: if degrees[node] > max_degree: max_degree = degrees[node] return max_degree
8a311280e8ac61972fa9dcd05dab3ead98456720
682,480
from typing import Callable import functools from typing import Any import warnings def deprecated(func: Callable) -> Callable: """Decorator that can be used to mark functions as deprecated, emitting a warning when the function is used. Arguments: func: The function to be decorated. Returns: The decorated function, which emits a warning when used. """ @functools.wraps(func) def wrapper(*args, **kwargs) -> Any: """Wrapped function to be returned by the decorator. Returns: The original function evaluation. """ warnings.simplefilter("always", DeprecationWarning) # turn off filter warnings.warn( "Deprecated function {} invoked".format(func.__name__), category=DeprecationWarning, stacklevel=2, ) warnings.simplefilter("default", DeprecationWarning) # reset filter return func(*args, **kwargs) return wrapper
747df84c3b3b6e8bf8f24fa1e8244fd45d781d6f
682,484
def printtable(table, moe=False): """Pretty print information on a Census table (such as produced by `censustable`). Args: table (OrderedDict): Table information from censustable. moe (bool, optional): Display margins of error. Returns: None. Examples:: censusdata.printtable(censusdata.censustable('acs5', 2015, 'B19013')) """ print(u'{0:12} | {1:30.30} | {2:56} | {3:5}'.format('Variable', 'Table', 'Label', 'Type')) print(u'-'*115) for k in table.keys(): if not moe and k[-1] == 'M': continue # don't clutter output with margins of error label = table[k]['label'] label = '!! '*label.count('!!') + label.replace('!!', ' ') print(u'{0:12} | {1:30.30} | {2:56.56} | {3:5}'.format(k, table[k]['concept'], label, table[k]['predicateType'])) print(u'-'*115) return None
346935c49c9b6e213914ed7f85cee05d01361c9a
682,485
def _juniper_vrf_default_mapping(vrf_name: str) -> str: """ This function will convert Juniper global/default/master routing instance => master => default :param vrf_name: :return str: "default" if vrf_name = "master" else vrf_name """ if vrf_name == "master": return "default" else: return vrf_name
30028de90d0bb04824dda33f41ebca8bd2b17dba
682,486
def nz(df, y=0): """ Replaces NaN values with zeros (or given value) in a series. Same as the nz() function of Pinescript """ return df.fillna(y)
5d0b940b88d7c8dfb7109d5e1a2538784231db1c
682,489
def placemark_name(lst): """Formats the placemark name to include its state if incomplete.""" if lst[1] == 'Complete': return lst[0] else: return ': '.join(lst)
aeee6bbbf909b82f241c45fd3bbfe149d87c2dc1
682,490
def calc_suffix(_str, n): """ Return an n charaters suffix of the argument string of the form '...suffix'. """ if len(_str) <= n: return _str return '...' + _str[-(n - 3):]
67ff72f0ed1f3f1f6a06c10089293c8a61e1cf75
682,495
from typing import List def datum_is_sum(datum: int, preamble: List[int]) -> bool: """Iterate the preamble numbers and determine whether datum is a sum. Args: datum (int): a number that should be a sum of any two numbers in preamble preamble (List[int]): a list of preceeding n numbers where n is preamble_size in check_data_for_invalid() Returns: bool: True if the datum is a sum; False otherwise """ for pre in preamble: diff = datum - pre # The difference must not be the same as the addend that produced it if diff == pre: continue elif diff in preamble: return True return False
e5024bb0adbada3ba07a949505db8a31c0170958
682,497
def filter_data(data, var_val_pairs): """ We use this to filter the data more easily than using pandas subsetting Args: data (df) is a dataframe var_val pairs (dict) is a dictionary where the keys are variables and the value are values """ d = data.copy() for k, v in var_val_pairs.items(): d = d.loc[d[k] == v] return d.reset_index(drop=True)
66e2dcd66ffa7221795ac598a767648fdfeefcf6
682,499
def getFirstMatching(values, matches): """ Return the first element in :py:obj:`values` that is also in :py:obj:`matches`. Return None if values is None, empty or no element in values is also in matches. :type values: collections.abc.Iterable :param values: list of items to look through, can be None :type matches: collections.abc.Container :param matches: list of items to check against """ assert matches is not None if not values: return None return next((i for i in values if i in matches), None)
7b11d769ba9a95bfd586fdac67db6b6e9a0cebc1
682,501
from typing import Tuple from typing import List def compute_padding(kernel_size: Tuple[int, int]) -> List[int]: """Computes padding tuple.""" # 4 ints: (padding_left, padding_right,padding_top,padding_bottom) # https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad assert len(kernel_size) == 2, kernel_size computed = [(k - 1) // 2 for k in kernel_size] return [computed[1], computed[1], computed[0], computed[0]]
a0e50ac4d2eb77f99bae117f9357cc6cefeb78c9
682,503
def _calculate_temperature(c, h): """ Compute the temperature give a speed of sound ``c`` and humidity ``h`` """ return (c - 331.4 - 0.0124 * h) / 0.6
08555185c563bc5d67c4408e15253994b71ad703
682,506
def get_region(df, region): """ Extract a single region from regional dataset. """ return df[df.denominazione_regione == region]
08a78778bc9b2e3e4eabeb846ea88ee7a67f67a8
682,508
def get_installed_tool_shed_repository( trans, id ): """Get a tool shed repository record from the Galaxy database defined by the id.""" return trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( id ) )
aeda7ad13a0c2dc490a93b5240bd4dd3c6aa283a
682,511
def _label_group(key, pattern): """ Build the right pattern for matching with named entities. >>> _label_group('key', '[0-9]{4}') '(?P<key>[0-9]{4})' """ return '(?P<%s>%s)' % (key, pattern)
db244f37d968e8cda5d2525b1ba56a009766fb03
682,513
def _to_set(loc): """Convert an array of locations into a set of tuple locations.""" return set(map(tuple, loc))
57623e0063df65eb51b46a3dbb01915528e60090
682,514
def is_valid_port(entry, allow_zero = False): """ Checks if a string or int is a valid port number. :param list,str,int entry: string, integer or list to be checked :param bool allow_zero: accept port number of zero (reserved by definition) :returns: **True** if input is an integer and within the valid port range, **False** otherwise """ try: value = int(entry) if str(value) != str(entry): return False # invalid leading char, e.g. space or zero elif allow_zero and value == 0: return True else: return value > 0 and value < 65536 except TypeError: if isinstance(entry, (tuple, list)): for port in entry: if not is_valid_port(port, allow_zero): return False return True else: return False except ValueError: return False
4a267120f639fd91d1554f9038b323fa028201c0
682,515
from typing import Any def hide_if_not_none(secret: Any): """ Hide a secret unless it is None, which means that the user didn't set it. :param secret: the secret. :return: None or 'hidden' """ value = None if secret is not None: value = 'hidden' return value
a2d242e764066916a9427609ab0c949a9237e06a
682,516
def percentIncrease(old, new): """ Calculates the percent increase between two numbers. Parameters ---------- old : numeric The old number. new : numeric The new number. Returns ------- numeric The percent increase (as a decimal). """ return (new - old)/old
1b0636fa52ef6691c2f7066db6df5f40cf52dc64
682,517
from typing import OrderedDict def create_model(api, name, fields): """Helper for creating api models with ordered fields :param flask_restx.Api api: Flask-RESTX Api object :param str name: Model name :param list fields: List of tuples containing ['field name', <type>] """ d = OrderedDict() for field in fields: d[field[0]] = field[1] return api.model(name, d)
9119e43e07ba3c7baeb176b0f7ba285f64c8c8da
682,519
import torch def rgb_to_srgb(image): """Applies gamma correction to rgb to get sRGB. Assumes input is in range [0,1] Works for batched images too. :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are linearized R, G, B :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are gamma-corrected RGB """ # assert torch.max(image) <= 1 small_u = image*12.92 big_u = torch.pow(image,.416)*1.055-0.055 return torch.where(image<=0.0031308, small_u, big_u)
60d4e199152fba9829076722816f6e8efa3c659d
682,521
def confirmed_password_valid(password, confirmation): """ Tell if a password was confirmed properly Args: password the password to check. confirmation the confirmation of the password. Returns: True if the password and confirmation are the same (no typos) """ return password == confirmation
2404ee5b1d0c75cc9ddab4155fd10efad7ab0b70
682,522
def contains(element): """ Check to see if an argument contains a specified element. :param element: The element to check against. :return: A predicate to check if the argument is equal to the element. """ def predicate(argument): try: return element in argument except: return False return predicate
901785df69b8ee3f07c01cc1d3a1ce1c31d204b2
682,523
def split_bbox(bbox): """Split a bounding box into its parts. :param bbox: String describing a bbox e.g. '106.78674459457397, -6.141301491467023,106.80691480636597,-6.133834354201348' :type bbox: str :returns: A dict with keys: 'southwest_lng, southwest_lat, northeast_lng, northeast_lat' :rtype: dict """ values = bbox.split(',') if not len(values) == 4: raise ValueError('Invalid bbox') # pylint: disable=W0141 # next line should probably use list comprehension rather # http://pylint-messages.wikidot.com/messages:w0141 values = map(float, values) # pylint: enable=W0141 names = ['SW_lng', 'SW_lat', 'NE_lng', 'NE_lat'] coordinates = dict(zip(names, values)) return coordinates
7ee3a466eac84235d1897bc2996afcbe243f64d9
682,526
def compare_rgb_colors_tolerance(first_rgb_color, second_rgb_color, tolerance): """ Compares to RGB colors taking into account the given tolerance (margin for error) :param first_rgb_color: tuple(float, float, float), first color to compare :param second_rgb_color: tuple(float, float, float), second color to compare :param tolerance: float, range in which the colors can vary :return: bool, True if two colors matches within the given tolerance; False otherwise """ for i, value in enumerate(first_rgb_color): if not abs(value - second_rgb_color[i]) <= tolerance: return False return True
c1c912eaabe52fbb581ca6c3c6d0769b55a7e470
682,528
from pathlib import Path def GetRendererLabelFromFilename(file_path: str) -> str: """Gets the renderer label from the given file name by removing the '_renderer.py' suffix.""" file_name = Path(file_path).stem return file_name.rstrip("_renderer.py")
11e97b9712046840103b7bb5910f2b82109f0545
682,529
def _sample_generator(*data_buffers): """ Takes a list of many mono audio data buffers and makes a sample generator of interleaved audio samples, one sample from each channel. The resulting generator can be used to build a multichannel audio buffer. >>> gen = _sample_generator("abcd", "ABCD") >>> list(gen) ["a", "A", "b", "B", "c", "C", "d", "D"] """ frame_gen = zip(*data_buffers) return (sample for frame in frame_gen for sample in frame)
4688d9abd62d88e08563625a5f53c5088e4c9bcd
682,530
def get_novel_smiles(new_unique_smiles, reference_unique_smiles): """Get novel smiles which do not appear in the reference set. Parameters ---------- new_unique_smiles : list of str List of SMILES from which we want to identify novel ones reference_unique_smiles : list of str List of reference SMILES that we already have """ return set(new_unique_smiles).difference(set(reference_unique_smiles))
2316e4c4bc41a8f68a2c2ff636f6a99222154e98
682,531
def error(message, code=400): """Generate an error message. """ return {"error": message}, code
11214c1cfa7052aefa91ae532b15548331d153cb
682,532
import warnings def _validate_buckets(categorical, k, scheme): """ This method validates that the hue parameter is correctly specified. Valid inputs are: 1. Both k and scheme are specified. In that case the user wants us to handle binning the data into k buckets ourselves, using the stated algorithm. We issue a warning if the specified k is greater than 10. 2. k is left unspecified and scheme is specified. In that case the user wants us to handle binning the data into some default (k=5) number of buckets, using the stated algorithm. 3. Both k and scheme are left unspecified. In that case the user wants us bucket the data variable using some default algorithm (Quantiles) into some default number of buckets (5). 4. k is specified, but scheme is not. We choose to interpret this as meaning that the user wants us to handle bucketing the data into k buckets using the default (Quantiles) bucketing algorithm. 5. categorical is True, and both k and scheme are False or left unspecified. In that case we do categorical. Invalid inputs are: 6. categorical is True, and one of k or scheme are also specified. In this case we raise a ValueError as this input makes no sense. Parameters ---------- categorical : boolean Whether or not the data values given in ``hue`` are already a categorical variable. k : int The number of categories to use. This variable has no effect if ``categorical`` is True, and will be set to 5 by default if it is False and not already given. scheme : str The PySAL scheme that the variable will be categorized according to (or rather, a string representation thereof). Returns ------- (categorical, k, scheme) : tuple A possibly modified input tuple meant for reassignment in place. """ if categorical and (k != 5 or scheme): raise ValueError("Invalid input: categorical cannot be specified as True simultaneously with scheme or k " "parameters") if k > 10: warnings.warn("Generating a choropleth using a categorical column with over 10 individual categories. " "This is not recommended!") if not scheme: scheme = 'Quantiles' # This trips it correctly later. return categorical, k, scheme
2953c08d85705728d8f269db67772e420ee85f87
682,534
import json def save_json(f, cfg): """ Save JSON-formatted file """ try: with open(f, 'w') as configfile: json.dump(cfg, configfile) except: return False return True
75c232000962a4edbad5131b11f2701157f32d76
682,537
import math def parallelepiped_magnet( length=0.01, width=0.006, thickness=0.001, density=7500 ): """ Function to get the masse, volume and inertial moment of a parallelepiped magnet. International system units. The hole for the axis is ignored. """ V = length * width * thickness m = V * density mom_z = m * (math.pow(length, 2) + math.pow(width, 2)) / 12 return {"V": V, "m": m, "mom_z": mom_z}
56ddf74e437e3cc60a9eb11e291fe93ccaa58fd2
682,538
def max_cw(l): """Return max value of a list.""" a = sorted(l) return a[-1]
21b079b5c3dd4cb7aba55588d38ba57a058bbb97
682,539
def eia_mer_url_helper(build_url, config, args): """Build URL's for EIA_MER dataset. Critical parameter is 'tbl', representing a table from the dataset.""" urls = [] for tbl in config['tbls']: url = build_url.replace("__tbl__", tbl) urls.append(url) return urls
6d753c6c191ce27e52749407b575203452183d4c
682,543
def safe_index(l, e): """Gets the index of e in l, providing an index of len(l) if not found""" try: return l.index(e) except: return len(l)
223d89964888293586726f29978e195549c8c134
682,546
import codecs import json def load_json(path_to_json: str) -> dict: """Load json with information about train and test sets.""" with codecs.open(path_to_json, encoding='utf-8') as train_test_info: return json.loads(train_test_info.read())
729cbfb714594e00d6d8ceb786ea8f55e094905b
682,547
def _and(arg1, arg2): """Boolean and""" return arg1 and arg2
3456e68c2d06dc212ddff43869bb760b85245729
682,550
def get_repositories(reviewboard): """Return list of registered mercurial repositories""" repos = reviewboard.repositories() return [r for r in repos if r.tool == 'Mercurial']
520235347e3cb32bcb190d69f5038feaa966753f
682,555
import grp def check_gid(gid): """ Get numerical GID of a group. Raises: KeyError: Unknown group name. """ try: return 0 + gid # already numerical? except TypeError: if gid.isdigit(): return int(gid) else: return grp.getgrnam(gid).gr_gid
5d3151754da1eaf83507288bb2ecdde25e2f27ad
682,557