content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def in_box(coords, box): """ Find if a coordinate tuple is inside a bounding box. :param coords: Tuple containing latitude and longitude. :param box: Two tuples, where first is the bottom left, and the second is the top right of the box. :return: Boolean indicating if the coordinates are in the box. """ if box[0][0] < coords[0] < box[1][0] and box[0][1] < coords[1] < box[1][1]: return True return False
ed4082b6311929e4982b4196ceaa566b05dfd714
682,804
def batch_delete(query, session): """ Delete the result rows from the given query in batches. This minimizes the amount of time that the table(s) that the query selects from will be locked for at once. """ n = 0 query = query.limit(25) while True: if query.count() == 0: break for row in query: n += 1 session.delete(row) session.commit() return n
eb10e259267882011581a46409a5464520304852
682,805
def to_triplets(colors): """ Coerce a list into a list of triplets. If `colors` is a list of lists or strings, return it as is. Otherwise, divide it into tuplets of length three, silently discarding any extra elements beyond a multiple of three. """ try: colors[0][0] return colors except: pass # It's a 1-dimensional list extra = len(colors) % 3 if extra: colors = colors[:-extra] return list(zip(*[iter(colors)] * 3))
809a8c57d2f590fb2984124efe86814850bb8921
682,807
import random import string def random_text(n) : """Generate random text 'n' characters""" return ''.join([random.choice(string.digits + string.ascii_letters) for x in range(n)])
c8f6b14983a5f5712d6da8f6374dca8f5997ed07
682,808
import six import re def to_bytes(value): """Convert numbers with a byte suffix to bytes. """ if isinstance(value, six.string_types): pattern = re.compile('^(\d+)([K,M,G]{1})$') match = pattern.match(value) if match: value = match.group(1) suffix = match.group(2) factor = { 'K': 1024, 'M': 1024 ** 2, 'G': 1024 ** 3, }[suffix] return int(round(factor * float(value))) return value
a2e686d56bd2bed9918ea4e8a165de36d54994e8
682,810
def is_perfect_slow(n): """ decides if a given integer n is a perfect number or not this is the straightforward implementation """ if n <= 0: return(False) sum = 0 for i in range(1,n): if n % i == 0: sum += i return(sum == n)
efd570f4d4e7eb4d6fde87705f4f0e515d0abe24
682,812
def licence_name_to_file_name(licence_name: str) -> str: """ Converts a licence name to the name of the file containing its definition. :param licence_name: The licence name. :return: The file name. """ return licence_name.lower().replace(" ", "-") + ".txt"
d74bd90f5b6c65cd662b88dc69f361507dc47bb2
682,817
def format_position(variant): """Gets a string representation of the variants position. Args: variant: third_party.nucleus.protos.Variant. Returns: A string chr:start + 1 (as start is zero-based). """ return '{}:{}'.format(variant.reference_name, variant.start + 1)
215534a2905622835a99f6945b73ac4fbe13e1f1
682,820
def score(goal, test_string): """compare two input strings and return decimal value of quotient likeness""" #goal = 'methinks it is like a weasel' num_equal = 0 for i in range(len(goal)): if goal[i] == test_string[i]: num_equal += 1 return num_equal / len(goal)
c50a2c3f2ff95d129e8816e4384d9b39ebd0e44b
682,827
import copy def pad_1d_list(data, element, thickness=1): """ Adds padding at the start and end of a list This will make a shallow copy of the original eg: pad_1d_list([1,2], 0) -> returns [0,1,2,0] Args: data: the list to pad element: gets added as padding (if its an object, it won't be instanced, just referenced in the lists) thickness: how many layers of padding Returns: the padded list """ # shallow copy data = copy.copy(data) for i in range(thickness): data.insert(0, element) data.append(element) return data
2228307755053728f3ab11540d6673f5d6e8fa1f
682,831
def process_single(word): """ Process a single word, whether it's identifier, number or symbols. :param word: str, the word to process :return: str, the input """ if word[0].isnumeric(): try: int(word) except ValueError: raise ValueError("Expression {} not valid".format(word)) return word
0a08fd6ed7402fb4351adff8fb7f59106fbe4ca8
682,833
def remove_non_seriasable(d): """ Converts AnnotationType and EntityType classes to strings. This is needed when saving to a file. """ return { k: str(val.name).lower() if k in ('annotation_type', 'entity_type') else val for k, val in d.items() }
30a9b50685ca5140ff99c7c98586fdee86d21212
682,836
def merge_dictionaries(dict1, dict2): """Merge dictionaries together, for the case of aggregating bindings.""" new_dict = dict1.copy() new_dict.update(dict2) return new_dict
cd1445c458a42414a90b7d8c3810885ab351b46e
682,838
def transpose(matrix): """ Compute the matrix transpose :param matrix: the matrix to be transposed, the transposing will not modify the input matrix :return: the transposed of matrix """ _transposed = [] for row in range(len(matrix)): _transposed.append( [matrix[i][row] for i in range(len(matrix))] ) return _transposed
2dd0db2737fe1691d414c207c88fd8419c1860b6
682,843
def rivers_with_station(stations): """Given a list of stations, return a alphabetically sorted set of rivers with at least one monitoring station""" river_set = set() for station in stations: river_set.add(station.river) river_set = sorted(river_set) return river_set
16161ababf034709e095406007db42e10b47c921
682,845
def permute(lst, perm): """Permute the given list by the permutation. Args: lst: The given list. perm: The permutation. The integer values are the source indices, and the index of the integer is the destination index. Returns: A permutation copy of lst. """ return tuple([lst[i] for i in perm])
b12be0a40251d51e314e313996e1e11c60e4748c
682,849
import socket def gethostbyaddr(ip): """ Resolve a single host name with gethostbyaddr. Returns a string on success. If resolution fails, returns None. """ host = None try: host = socket.gethostbyaddr(ip)[0] except OSError: pass return host
5e7c041171ea4fbaa75fa477f64121db5f05cd39
682,851
import torch import math def gain_change(dstrfs, batch_size=8): """ Measure standard deviation of dSTRF gains. Arguments: dstrfs: tensor of dSTRFs with shape [time * channel * lag * frequency] Returns: gain_change: shape change parameter, tensor of shape [channel] """ tdim, cdim, ldim, fdim = dstrfs.shape if cdim > batch_size: return torch.cat([ gain_change( dstrfs[:, k*batch_size:(k+1)*batch_size], batch_size=batch_size ) for k in range(math.ceil(cdim / batch_size)) ]) return dstrfs.norm(dim=[-2, -1]).std(dim=0).cpu()
a64840f2d7d46869c226614da0977b21bc0140b9
682,854
def multiply(*fields, n): """ Multiply ``n`` to the given fields in the document. """ def transform(doc): for field in fields: doc = doc[field] doc *= n return transform
7697b0c21ecb6e77aedbaa60035c73f1fcee8239
682,855
def to_matrix_vector(transform): """Split an homogeneous transform into its matrix and vector components. The transformation must be represented in homogeneous coordinates. It is split into its linear transformation matrix and translation vector components. This function does not normalize the matrix. This means that for it to be the inverse of from_matrix_vector, transform[-1, -1] must equal 1, and transform[-1, :-1] must equal 0. Parameters ---------- transform : numpy.ndarray Homogeneous transform matrix. Example: a (4, 4) transform representing linear transformation and translation in 3 dimensions. Returns ------- matrix, vector : numpy.ndarray The matrix and vector components of the transform matrix. For an (N, N) transform, matrix will be (N-1, N-1) and vector will be a 1D array of shape (N-1,). See Also -------- from_matrix_vector """ ndimin = transform.shape[0] - 1 ndimout = transform.shape[1] - 1 matrix = transform[0:ndimin, 0:ndimout] vector = transform[0:ndimin, ndimout] return matrix, vector
ff64c6e93bfe8794e0a342bdc449bb42a05dc577
682,858
def prompt_int(prompt): """ Prompt until the user provides an integer. """ while True: try: return int(input(prompt)) except ValueError as e: print('Provide an integer')
9352b78cd01466654d65ed86482f7163000ec1c4
682,860
def check_overlap(stem1, stem2): """ Checks if 2 stems use any of the same nucleotides. Args: stem1 (tuple): 4-tuple containing stem information. stem2 (tuple): 4-tuple containing stem information. Returns: bool: Boolean indicating if the two stems overlap. """ # Check for string dummy variable used when implementing a discrete variable. if type(stem1) == str or type(stem2) == str: return False # Check if any endpoints of stem2 overlap with stem1. for val in stem2: if stem1[0] <= val <= stem1[1] or stem1[2] <= val <= stem1[3]: return True # Check if endpoints of stem1 overlap with stem2. # Do not need to check all stem1 endpoints. for val in stem1[1:3]: if stem2[0] <= val <= stem2[1] or stem2[2] <= val <= stem2[3]: return True return False
3d1381eb5e8cedf7cfe37a8263bbba670240d575
682,863
import ast def str_to_list(string: str) -> list: """ convert list-like str to list :param string: "[(0, 100), (105, 10) ...]" :return: list of tuples """ return ast.literal_eval(string)
152e247a08d7ee50a85bfe5044ea6d3a9ce05169
682,866
import json def json_load(path, verbose=False): """Load python dictionary stored in JSON file at ``path``. Args: path (str): Path to the file verbose (bool): Verbosity flag Returns: (dict): Loaded JSON contents """ with open(path, 'r') as f: if verbose: print('Loading data from {0}'.format(path)) return json.load(f)
ac099f33666c6502a253e91c6c03db434fa361cc
682,870
def center_crop(img_mat, size = (224, 224)): """ Center Crops an image with certain size, image must be bigger than crop size (add check for that) params: img_mat: (3D-matrix) image matrix of shape (width, height, channels) size: (tuple) the size of crops (width, height) returns: img_mat: that has been center cropped to size of center crop """ w,h,c = img_mat.shape start_h = h//2-(size[1]//2) # Size[1] - h of cropped image start_w = w//2-(size[0]//2) # Size[0] - w of cropepd image return img_mat[start_w:start_w+size[0],start_h:start_h+size[1], :]
5a65f44f3bc6e5fb7b768769580a3a0706e60673
682,871
def reorder_columns(frame, front_columns): """ Re-order the columns of frame placing front_columns first. Good for looking specifically at certain columns for specific file in pipeline. """ # reorder columns into appropriate order new_cols = list(frame.columns) for d in front_columns: new_cols.remove(d) new_cols = front_columns + new_cols frame = frame[new_cols] return frame
7f0bfabe04ee1551c9ad1384a53478f5e0ad656a
682,874
def plot_line(m, line, colour='b', lw=1, alpha=1): """ Plots a line given a line with lon,lat coordinates. Note: This means you probably have to call shapely `transform` on your line before passing it to this function. There is a helper partial function in utils called `utm2lola` which makes this easy. Args: m (Basemap): A matplotlib Basemap. line (shape): A shapely geometry. colour (str): A colour from the matplotlib dictionary. Returns: list: A list of matplotlib lines. """ lo, la = line.xy x, y = m(lo, la) return m.plot(x, y, color=colour, linewidth=lw, alpha=alpha, solid_capstyle='round')
4cf04142b7205116ff52c93ab32825a4874a28db
682,877
def _get_orientation(exif): """Get Orientation from EXIF. Args: exif (dict): Returns: int or None: Orientation """ if not exif: return None orientation = exif.get(0x0112) # EXIF: Orientation return orientation
d05b8e7f2029a303d40d049e8d3e1cb9da02fcef
682,879
import random def generate_layers(layer_limit: int, size_limit: int, matching: bool): """ Helper function for generating a random layer set NOTE: Randomized! :param layer_limit: The maximum amount of layers to generate :param size_limit: The maximum size of each layer :param matching: Specifies whether the amount of layers, and activation functions is equal :return: Two lists, the first containing the sizes of each layer, the second one the activation functions """ layer_amount = random.randint(1, layer_limit) activation_amount = random.randint(1, layer_limit) while ( (activation_amount != layer_amount) if matching else (activation_amount >= layer_amount) ): activation_amount = random.randint(1, layer_limit) layer_amount = random.randint(1, layer_limit) rnd_hidden = [random.randint(1, size_limit) for _ in range(layer_amount)] rnd_activation = ["ReLU"] * activation_amount return rnd_hidden, rnd_activation
27807566aee5d00f290821323e6aadce3733a8c7
682,885
def expand_resources(resources): """ Construct the submit_job arguments from the resource dict. In general, a k,v from the dict turns into an argument '--k v'. If the value is a boolean, then the argument turns into a flag. If the value is a list/tuple, then multiple '--k v' are presented, one for each list item. :resources: a dict of arguments for the farm submission command. """ cmd = '' for field, val in resources.items(): if type(val) is bool: if val is True: cmd += '--{} '.format(field) elif type(val) is list or type(val) is tuple: for mp in val: cmd += '--{} {} '.format(field, mp) else: cmd += '--{} {} '.format(field, val) return cmd
5e7edff0ceaf9b24a1d69cb9dcb3e9473e1d7b66
682,892
def gen_workspace_tfvars_files(environment, region): """Generate possible Terraform workspace tfvars filenames.""" return [ # Give preference to explicit environment-region files "%s-%s.tfvars" % (environment, region), # Fallback to environment name only "%s.tfvars" % environment, ]
a1e822451b9652b846eaf21c6323943678e75d84
682,893
def comma_sep(values, limit=20, stringify=repr): """ Print up to ``limit`` values, comma separated. Args: values (list): the values to print limit (optional, int): the maximum number of values to print (None for no limit) stringify (callable): a function to use to convert values to strings """ count = len(values) if limit is not None and count > limit: values = values[:limit] continuation = f", ... ({count - limit} more)" if count > limit else "" else: continuation = "" rendered = ", ".join(stringify(x) for x in values) return rendered + continuation
748f1956ed8383fc715d891204056e77db30eb4b
682,895
def myround (val, r=2): """ Converts a string of float to rounded string @param {String} val, "42.551" @param {int} r, the decimal to round @return {string} "42.55" if r is 2 """ return "{:.{}f}".format(float(val), r)
50e11ccae07764a3773e0a8adf45f010925f41b5
682,897
def normaliseports(ports): """Normalise port list Parameters ---------- ports : str Comma separated list of ports Returns ------- str | List If None - set to all Otherwise make sorted list """ if ports is None: return 'all' if ports in ('all', 'update', 'test'): return ports # use a set to remove possible duplicate values return sorted(list(set(map(int, ports.split(',')))))
125b73f0889cd99d5c0128a968b47ae4867cba7e
682,898
def review_pks(database): """ Check that all tables have a PK. It's not necessarily wrong, but gives a warning that they don't exist. :param database: The database to review. Only the name is needed. :type database: Database :return: A list of recommendations. :rtype: list of str """ print(f"reviewing primary keys for {database.database_name} database") issues = [] for table in database: if not table.primary_key: issues.append(f"No primary key on table {database.database_name}.{table.schema_name}.{table.table_name}") return issues
830c0a988ac6ccc593b979d113a0a9d88a0cca0c
682,899
import re def read_sta_file(sta_file): """ Read information from the station file with free format: net,sta,lon,lat,ele,label. The label is designed with the purpose to distinguish stations into types. """ cont = [] with open(sta_file,'r') as f: for line in f: line = line.rstrip() net,sta,_lon,_lat,_ele,label = re.split(" +",line) cont.append([net,sta,float(_lon),float(_lat),int(_ele),label]) f.close() if len(cont)==0: raise Exception(f"No content in the station file {sta_file}") return cont
4dad0b3227dadf4cf098f7df552cfac285862910
682,901
def read_file(file_name): """Returns the content of file file_name.""" with open(file_name) as f: return f.read()
560b6ec2eeb507d694b9c10a82220dc8a4f6ca52
682,905
def t_add(t, v): """ Add value v to each element of the tuple t. """ return tuple(i + v for i in t)
da2e58a7b2ff7c1cc9f38907aaec9e7c2d27e1d0
682,908
def fix_nonload_cmds(nl_cmds): """ Convert non-load commands commands dict format from Chandra.cmd_states to the values/structure needed here. A typical value is shown below: {'cmd': u'SIMTRANS', # Needs to be 'type' 'date': u'2017:066:00:24:22.025', 'id': 371228, # Store as params['nonload_id'] for provenence 'msid': None, # Goes into params 'params': {u'POS': -99616}, 'scs': None, # Set to 0 'step': None, # Set to 0 'time': 605233531.20899999, # Ignored 'timeline_id': None, # Set to 0 'tlmsid': None, # 'None' if None 'vcdu': None}, # Set to -1 """ new_cmds = [] for cmd in nl_cmds: new_cmd = {} new_cmd['date'] = str(cmd['date']) new_cmd['type'] = str(cmd['cmd']) new_cmd['tlmsid'] = str(cmd['tlmsid']) for key in ('scs', 'step', 'timeline_id'): new_cmd[key] = 0 new_cmd['vcdu'] = -1 new_cmd['params'] = {} new_cmd['params']['nonload_id'] = int(cmd['id']) if cmd['msid'] is not None: new_cmd['params']['msid'] = str(cmd['msid']) # De-numpy (otherwise unpickling on PY3 has problems). if 'params' in cmd: params = new_cmd['params'] for key, val in cmd['params'].items(): key = str(key) try: val = val.item() except AttributeError: pass params[key] = val new_cmds.append(new_cmd) return new_cmds
59bcce8af2c1779062d7a10d7186b170434bb244
682,917
import copy def _update_inner_xml_ele(ele, list_): """Copies an XML element, populates sub-elements from `list_` Returns a copy of the element with the subelements given via list_ :param ele: XML element to be copied, modified :type ele: :class:`xml.ElementTree.Element` :param list list_: List of subelements to append to `ele` :returns: XML element with new subelements from `list_` :rtype: :class:`xml.ElementTree.Element` """ ret = copy.copy(ele) for i, v in enumerate(list_): ret[i] = v return ret
772218e66aebb99e8d4f0901979969eeda128b99
682,918
def construct_fixture_middleware(fixtures): """ Constructs a middleware which returns a static response for any method which is found in the provided fixtures. """ def fixture_middleware(make_request, web3): def middleware(method, params): if method in fixtures: return { 'result': fixtures[method], } else: return make_request(method, params) return middleware return fixture_middleware
46720c2a6e158dc931bb2c474dee9d6ffc6ef264
682,919
def can_leftarc(stack, graph): """ Checks that the top of the has no head :param stack: :param graph: :return: """ if not stack: return False if stack[0]['id'] in graph['heads']: return False else: return True
352fdebf1612f79a32569403cc4fd1caf51a456d
682,922
def concatenate_dictionaries(d1: dict, d2: dict, *d): """ Concatenate two or multiple dictionaries. Can be used with multiple `find_package_data` return values. """ base = d1 base.update(d2) for x in d: base.update(x) return base
d35391843b299545d6d7908e091ac9b9af274979
682,934
def nulls(x): """ Convert values of -1 into None. Parameters ---------- x : float or int Value to convert Returns ------- val : [x, None] """ if x == -1: return None else: return x
d87c6db9755121ec8f2ec283fb6686050d31b009
682,936
from typing import List def compute_skew(sequence: str) -> List[float]: """Find the skew of the given sequence Arguments: sequence {str} -- DNA string Returns: List[float] -- skew list """ running_skew = [] skew = 0 for base in sequence.upper(): if base == "G": skew += 1 elif base == "C": skew -= 1 else: skew += 0 running_skew.append(skew) return running_skew
678510360d2c8ab39c5f91351e90d08ddad3f9bb
682,937
import base64 import hashlib def generate_hash(url): """ Generates the hash value to be stored as key in the redis database """ hashval = base64.urlsafe_b64encode(hashlib.md5(url).digest()) hashval=hashval[0:6] return hashval
51add11d40c5a9538d5794de8d5724705436c0ea
682,938
def Fplan(A, Phi, Fstar, Rp, d, AU=False): """ Planetary flux function Parameters ---------- A : float or array-like Planetary geometric albedo Phi : float Planetary phase function Fstar : float or array-like Stellar flux [W/m**2/um] Rp : float Planetary radius [Earth radii] d : float Distance to star [pc] AU : bool, optional Flag that indicates d is in AU Returns ------- Fplan : float or array-like Planetary flux [W/m**2/um] """ Re = 6.371e6 # radius of Earth (m) ds = 3.08567e16 # parsec (m) if AU: ds = 1.495979e11 # AU (m) return A*Phi*Fstar*(Rp*Re/d/ds)**2.
5f7b92e31ba22bd44f2e710acb4048fd9860409b
682,941
def create_inventory(items): """ Create an inventory dictionary from a list of items. The string value of the item becomes the dictionary key. The number of times the string appears in items becomes the value, an integer representation of how many times. :param items: list - list of items to create an inventory from. :return: dict - the inventory dictionary. """ items_dict = {} for item in items: if item in items_dict: items_dict[item] += 1 else: items_dict[item] = 1 return items_dict
9fb15b9c742d03f197c0e0fc402caa94d47f65b4
682,945
import six def get_paramfile(path, cases): """Load parameter based on a resource URI. It is possible to pass parameters to operations by referring to files or URI's. If such a reference is detected, this function attempts to retrieve the data from the file or URI and returns it. If there are any errors or if the ``path`` does not appear to refer to a file or URI, a ``None`` is returned. :type path: str :param path: The resource URI, e.g. file://foo.txt. This value may also be a non resource URI, in which case ``None`` is returned. :type cases: dict :param cases: A dictionary of URI prefixes to function mappings that a parameter is checked against. :return: The loaded value associated with the resource URI. If the provided ``path`` is not a resource URI, then a value of ``None`` is returned. """ data = None if isinstance(path, six.string_types): for prefix, function_spec in cases.items(): if path.startswith(prefix): function, kwargs = function_spec data = function(prefix, path, **kwargs) return data
e9a55bf9459f609f9a39678cd97541318a0ba48f
682,949
def adds_comment_sign(data: str, comment_sign: str) -> str: """Adds comment signs to the string.""" return "\n".join(list(f"{comment_sign} {line}".strip() for line in data.split("\n")[:-1]))
a78d21fd1ae5325911e9b3e5bec3bf81f86757d1
682,952
def positive_coin_types_to_string(coin_dict): """ Converts only the coin elements that are greater than 0 into a string. Arguments: coin_dict (dict): A dictionary consisting of all 4 coin types. Returns: (string): The resulting string. """ plat = "" gold = "" silver = "" copper = "" if coin_dict['plat'] > 0: plat = f"{coin_dict['plat']}p " if coin_dict['gold'] > 0: gold = f"{coin_dict['gold']}g " if coin_dict['silver'] > 0: silver = f"{coin_dict['silver']}s " if coin_dict['copper'] > 0: copper = f"{coin_dict['copper']}c" return f"{plat}{gold}{silver}{copper}".strip()
e4ad715e008fd836992b8023772a71cba16818bf
682,956
def finalize_model(input_model): """ Extracts string from the model data. This function is always the last stage in the model post-processing pipeline. :param input_model: Model to be processed :return: list of strings, ready to be written to a module file """ finalized_output = [] for mline in input_model: finalized_output.append(mline[0]) return finalized_output
88c03ed9b4b6158895ac6e88ef27b573de2b0027
682,961
from typing import OrderedDict def load_object(worksheet): """ Converts worksheet to dictionary Args: object: worksheet Returns: object: dictionary """ #d = {} #d = OrderedDict() d = OrderedDict() for curr_col in range(0, worksheet.ncols): liste_elts = worksheet.col_values(curr_col) d[worksheet.cell_value(0, curr_col)] = liste_elts[1:len(liste_elts)] return d
d9c70f6805fbe906042d9a94293b4b989bee02b1
682,962
def getConstructors(jclass): """Returns an array containing Constructor objects reflecting all the public constructors of the class represented by this Class object.""" return jclass.class_.getConstructors()[:]
c7dc893002ab913b55ce6e6e121af322fd6ab7b6
682,975
def ask_move(player: int) -> int: """Ask the player which pawn to move. Returns an integer between 0 and 3.""" while True: try: pawn_number = int(input(f"Player {player}: Choose a piece to move (0-3): ")) except ValueError: continue else: if 0 <= pawn_number <= 3: break return pawn_number
b5b23052fe24078f44ff4bddb52e4bbd693807d0
682,976
def find(arr, icd_code=False): """Search in the first column of `arr` for a 'Yes' and return the respective entry in the second column.""" search = [str(item) for item in arr[:,0]] find = [str(item) for item in arr[:,1]] try: idx = [i for i,item in enumerate(search) if "Yes" in item] found = find[idx[0]] except: found = "unknown" if icd_code: found = found.replace("\xa0", "") found = found.replace(" ", "") else: found = found.lower() return found
87fe9d1e98a4feece19222e8a84a3e6ffb90540d
682,981
from typing import IO def open_file_or_stream(fos, attr, **kwargs) -> IO: """Open a file or use the existing stream. Avoids adding this logic to every function that wants to provide multiple ways of specifying a file. Args: fos: File or stream attr: Attribute to check on the ``fos`` object to see if it is a stream, e.g. "write" or "read" kwargs: Additional keywords passed to the ``open`` call. Ignored if the input is a stream. Returns: Opened stream object """ if hasattr(fos, attr): output = fos else: output = open(fos, **kwargs) return output
9f7955a0ced009039095f35c7f7deb1eb25f97b9
682,983
def find_max_sub(l): """ Find subset with higest sum Example: [-2, 3, -4, 5, 1, -5] -> (3,4), 6 @param l list @returns subset bounds and highest sum """ # max sum max = l[0] # current sum m = 0 # max sum subset bounds bounds = (0, 0) # current subset start s = 0 for i in range(len(l)): m += l[i] if m > max: max = m bounds = (s, i) elif m < 0: m = 0 s = i+1 return bounds, max
ebf42b58f9fea4276d0ba7d15b02faed4558efc6
682,984
def f_W(m_act, n, Wint): """ Calculate shaft power """ return m_act * n - Wint
4e1182c1fd55a0fb688a7146038cf7c2d0916a3c
682,988
def _formatDict(d): """ Returns dict as string with HTML new-line tags <br> between key-value pairs. """ s = '' for key in d: new_s = str(key) + ": " + str(d[key]) + "<br>" s += new_s return s[:-4]
be9749e5f69c604f3da95902b595f9086b01baa5
682,989
from typing import List def clusters_list(num_clusters: int) -> List[list]: """Create a list of empty lists for number of desired clusters. Args: num_clusters: number of clusters to find, we will be storing points in these empty indexed lists. Returns: clusters: empty list of lists. """ clusters = [] for _ in range(num_clusters): clusters.append([]) return clusters
d6130f67e72d1fd31def20f2bac1fd81c6878ba5
682,990
def number_format(num, places=0): """Format a number with grouped thousands and given decimal places""" places = max(0,places) tmp = "%.*f" % (places, num) point = tmp.find(".") integer = (point == -1) and tmp or tmp[:point] decimal = (point != -1) and tmp[point:] or "" count = 0 formatted = [] for i in range(len(integer), 0, -1): count += 1 formatted.append(integer[i - 1]) if count % 3 == 0 and i - 1: formatted.append(",") integer = "".join(formatted[::-1]) return integer+decimal
6d6f2412fa94857f77043a30ec2a14f809c5f039
682,991
def get_cdm_cluster_location(self, cluster_id): """Retrieves the location address for a CDM Cluster Args: cluster_id (str): The ID of a CDM cluster Returns: str: The Cluster location address str: Cluster location has not be configured. str: A cluster with an ID of {cluster_id} was not found. Raises: RequestException: If the query to Polaris returned an error """ try: query_name = "cdm_cluster_location" variables = { "filter": { "id": [cluster_id] } } query = self._query(query_name, variables) if query['nodes']: if query['nodes'][0]['geoLocation'] != None: return query['nodes'][0]['geoLocation']['address'] else: return "No Location Configured" else: raise Exception("A CDM Cluster with an ID of {} was not found.".format(cluster_id)) except Exception: raise
f8d3aa787b625e5461fd22eb11f2495a33bace0b
682,995
def is_session_dir(path): """Return whether a path is a session directory. Example of a session dir: `/path/to/root/mainenlab/Subjects/ZM_1150/2019-05-07/001/` """ return path.is_dir() and path.parent.parent.parent.name == 'Subjects'
474096a74068222ca8672fed838dc2703668648e
682,998
def coalesce(*xs): """ Coalescing monoid operation: return the first non-null argument or None. Examples: >>> coalesce(None, None, "not null") 'not null' """ if len(xs) == 1: xs = xs[0] for x in xs: if x is not None: return x return None
2210de29cb2fbc9571bd4bed9fc8a89feddbb8c8
683,001
import re def __is_rut_perfectly_formatted(rut: str) -> bool: """ Validates if Chilean RUT Number is perfectly formatted Args: rut (str): A Chilean RUT Number. For example 5.126.663-3 Returns: bool: True when Chilean RUT number (rut:str) is perfectly formatted ** Only validates the format not if the RUT is valid or not """ perfect_rut_regex = r"^(\d{1,3}(?:\.\d{1,3}){2}-[\dkK])$" return re.match(perfect_rut_regex, rut) is not None
42fbeb3968a1c2536e44154ab9e690817a57ccd4
683,007
def create_single_object_response(status, object, object_naming_singular): """ Create a response for one returned object @param status: success, error or fail @param object: dictionary object @param object_naming_singular: name of the object, f.ex. book @return: dictionary. """ return {"status": status, "data": { object_naming_singular: object }}
9626fc5594e5677196454594e719ebf415fc1ebc
683,016
def _query_item(item, query_id, query_namespace): """ Check if the given cobra collection item matches the query arguments. Parameters ---------- item: cobra.Reaction or cobra.Metabolite query_id: str The identifier to compare. The comparison is made case insensitively. query_namespace: str The miriam namespace identifier in which the given metabolite is registered. See https://www.ebi.ac.uk/miriam/main/collections The comparison is made case insensitively. Returns ------- bool True if the given id exists in the default namespace, or in the model annotations by the queried namespace, otherwise False. """ # Try the default identifiers (without confirming the namespace) if query_id.lower() == item.id.lower(): return True # Otherwise, try to find a case insensitive match for the namespace key for namespace in item.annotation: if query_namespace.lower() == namespace.lower(): annotation = item.annotation[namespace] # Compare the identifier case insensitively as well # Annotations may contain a single id or a list of ids if isinstance(annotation, list): if query_id.lower() in [i.lower() for i in annotation]: return True else: if query_id.lower() == annotation.lower(): return True return False
f3e418ab5cf2830d2c1dd6b4e83275e14dc8f4c8
683,020
import re def strip_html_tags(text): """Strip HTML tags in a string. :param text: String containing HTML code :type text: str :return: String without HTML tags :rtype: str :Example: >>> strip_html_tags('<div><p>This is a paragraph</div>') 'This is a paragraph' >>> strip_html_tags('<em class="highlight">Highlighted</em> text') 'Highlighted text' """ return re.sub('<[^<]+?>', '', text)
e7b060bdea980cfee217d81feccf56cf964f5557
683,022
def get_next_open_row(board, col): """ Finds the topmost vacant cell in column `col`, in `board`'s grid. Returns that cell's corresponding row index. """ n_rows = board.n_rows # check row by row, from bottom row to top row ([0][0] is topleft of grid) for row in range(n_rows - 1, -1, -1): if board.grid[row][col] == 0: return row # so pylint doesn't complain return None
7f6e45a0c136e53482a10264fc88020168056d8a
683,024
import csv def proc_attendees(att_file, config): """Opens the attendee list file, reads the contents and collects the desired information (currently first name, last name and email addresses) of the actual attendees into a dictionary keyed by the lowercase email address. This collection is returned. This collection allows for quick look-up (for checking attendance) and eliminates duplicate email addresses. Args: att_file - file object for the file containing the list of attendees We assume that this file object is valid, so no checking config - ConfigParser object containing the configuration data Returns: dictionary containing the de-duplicated collection of all attendees. Keys are the email attendee email addresses forced to lower case. """ attendees = {} email_field = config['ATTENDEES']['EMAIL_FIELD'] with att_file: reader = csv.DictReader(att_file) # use splitlines() to remove the line end characters #attendees = att.read().lower().splitlines() for row in reader: attendees[row[email_field].lower()] = row return attendees
e9a4a59ec557c999b2ff423df8a02ca04a4545a8
683,026
import re def _parse_arn(arn): """ ec2) arn:aws:ec2:<REGION>:<ACCOUNT_ID>:instance/<instance-id> arn:partition:service:region:account-id:resource-id arn:partition:service:region:account-id:resource-type/resource-id arn:partition:service:region:account-id:resource-type:resource-id Returns: resource_list, [regions] """ p = (r"(?P<arn>arn):" r"(?P<partition>aws|aws-cn|aws-us-gov):" r"(?P<service>[A-Za-z0-9_\-]*):" r"(?P<region>[A-Za-z0-9_\-]*):" r"(?P<account>[A-Za-z0-9_\-]*):" r"(?P<resources>[A-Za-z0-9_\-:/]*)") r = re.compile(p) match = r.match(arn) if match: d = match.groupdict() else: return (None, None) region = d.get('region', None) resource_id = None resources = d.get('resources', None) if resources: items = re.split('/|:', resources) if len(items) == 1: resource_id = items[0] elif len(items) == 2: resource_type = items[0] resource_id = items[1] else: print(f'ERROR parsing: {resources}') return [resource_id], [region]
c267a63cb82ce3c9e2dd0dadaea3ac5a53630d53
683,027
from pathlib import Path def is_subpath(parent_path: str, child_path: str): """Return True if `child_path is a sub-path of `parent_path` :param parent_path: :param child_path: :return: """ return Path(parent_path) in Path(child_path).parents
6852efb5eede8e16871533dca9d0bf17dd7454bb
683,030
def getIPaddr(prefix): """ Get the IP address of the client, by grocking the .html report. """ addr="Unknown" f=open(prefix+".html", "r") for l in f: w=l.split(" ") if w[0] == "Target:": addr=w[2].lstrip("(").rstrip(")") break f.close() return(addr) """ Busted (but otherwise more correct) web100 method Open a web100 logfile to extract the "RemoteAddr" print "Opening", log vlog = libweb100.web100_log_open_read(log) agent = libweb100.web100_get_log_agent(vlog) group = libweb100.web100_get_log_group(vlog) conn = libweb100.web100_get_log_connection(vlog) var = libweb100.web100_var_find(group, "RemoteAddr") snap = libweb100.web100_snapshot_alloc(group, conn) libweb100.web100_snap_from_log(snap, vlog) buf=cast(create_string_buffer(20), cvar.anything) # XXX libweb100.web100_snap_read(var, snap, buf) val=libweb100.web100_value_to_text(WEB100_TYPE_IP_ADDRESS, buf) libweb100.web100_log_close_read(vlog) print val """
de1e80e978eeef8ca74b05a7a53e0905e79a29e0
683,031
import re def clean_text(text, max_words=None, stopwords=None): """ Remove stopwords, punctuation, and numbers from text. Args: text: article text max_words: number of words to keep after processing if None, include all words stopwords: a list of words to skip during processing if None, ignored Returns: Space-delimited and cleaned string """ text = re.sub(r"[^a-zA-Z0-9\s]", "", text) tokens = re.split(r"\s+", text) good_tokens = [] for token in tokens: token = token.lower().strip() # remove stopwords if stopwords is not None and token in stopwords: continue # remove tokens without alphabetic characters (i.e. punctuation, numbers) if any(char.isalpha() for char in token): good_tokens.append(token) # skipping first ~20 words, which are often introductory if max_words is None: return good_tokens[20:] else: return good_tokens[20:20+max_words]
c5fe9bb01928355d81b566ad4ebee1232ebae810
683,033
def array_value(postiion, arr): """Returns the value at an array from the tuple""" row, col = postiion[0], postiion[1] value = arr[row, col] return value
d8a83ef4870d304fefe33220e38b78c8c8afee56
683,039
def get_padding(ks, s, hw): """ choose padding value to make sure: if s = 1, out_hw = in_hw; if s = 2, out_hw = in_hw // 2; if s = 4, out_hw = in_hw // 4; """ if hw % s == 0: pad = max(ks - s, 0) else: pad = max(ks - (hw % s), 0) if pad % 2 == 0: return pad // 2 else: return pad // 2 + 1
1fffec0037275bb71566b1967015f1ed7fb6a9bb
683,040
def generate_header_list(unsorted_keys): """Return a list of headers for the CSV file, ensuing that the order of the first four headers is fixed and the remainder are sorted.""" unsorted_keys.remove('identifier') sorted_remainder = sorted(unsorted_keys) header_list = ['identifier', 'label1', 'label2', 'label3'] header_list += sorted_remainder return header_list
18cb8d4be40e16da1b3c05070494dc791a7e4e02
683,041
def hamming_distance(str1, str2): """Calculate the Hamming distance between two bit strings Args: str1 (str): First string. str2 (str): Second string. Returns: int: Distance between strings. Raises: ValueError: Strings not same length """ if len(str1) != len(str2): raise ValueError('Strings not same length.') return sum(s1 != s2 for s1, s2 in zip(str1, str2))
8ca962c82b1321c32c34052ccf8f9a74d0f9245d
683,042
import requests def get_all_topk_articles(day_range): """ Accepts a list of dicts with year, month, and day values Returns a dictionary (article titles as keys) with all articles that were in the topk list during those dates and the pageview counts for each of the dates the article appeared in the topk Example query: https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/2020/03/31 """ q_template= "https://wikimedia.org/api/rest_v1/metrics/pageviews/top/en.wikipedia.org/all-access/{year}/{month}/{day}" all_articles = {} for day_val in day_range: q_string = q_template.format(**day_val) r = requests.get( url = q_string, headers = {'User-Agent': "hostbot (https://wikitech.wikimedia.org/wiki/Tool:HostBot, jonnymorgan.esq@gmail.com)"}, ) # print(r.headers) # print(r.text) # print(r.url) response = r.json() # print(response) # response = requests.get(q_string).json() top_articles_list = response['items'][0]['articles'] for ar in top_articles_list: if ar['article'] in all_articles.keys(): all_articles[ar['article']].update({day_val['api_date'] : ar['views']}) else: all_articles.update({ar['article'] : {day_val['api_date'] : ar['views']}}) return all_articles
4dfad93b94ef4efed2d9018b62bee721edf54b89
683,043
def sync(printer, ast): """Prints a synchronization "chan(!|?)".""" channel_str = printer.ast_to_string(ast["channel"]) op_str = ast["op"] return f'{channel_str}{op_str}'
ae3c9045a48a169e37602df13eb88da8ac9a65b6
683,045
from typing import Union from typing import List import pickle def parse_list(data: Union[str, List], indicator: int = 0, query=['MCF7']) -> List: """Filter the data based on compound, cell line, dose or time This function takes the directory of dataset, indicator that indicates whether you want to subset the data based on cell line, compound, dose, or time and a list which shows what part of the data you want to keep. The output will be a list of desired parsed dataset. Parameters ---------- data: Union[str, List] the data can be a string which is the directory of the dataset. dataset should be a pickle file. e.g., valid argument is something like this: './Data/level3_trt_cp_landmark.pkl' or it can be a list which contains the gene expression and metadata. It must be a list of tuples with the following format: line[0]:(cell_line, drug, drug_type, does, does_type, time, time_type) line[1]: 978 or 12328-dimensional Vector(Gene_expression_profile) indicator: int it must be an integer from 0 1 2 and 3 that shows whether we want to retrieve the data based on cells, compound or dose. 0: cell_lines 1:compounds 2:doses 3:time Default=0 (cell_lines) query: List list of cells or compounds or doses that we want to retrieve. The list depends on the indicator. If the indicator is 0, you should enter the list of desired cell lines and so on. Default=['MCF7'] Returns ------- parse_data: List A list containing data that belongs to desired list. """ assert isinstance(indicator, int), "The indicator must be an int object" assert indicator in [0, 1, 2, 3], "You should choose indicator from 0, 1, 2 range" assert isinstance(query, list), "The parameter query must be a list" print("=================================================================") print("Data Loading..") assert isinstance(data, (str, list)), "The data should be string or list object" if isinstance(data, str): with open(data, "rb") as f: train = pickle.load(f) else: assert isinstance(data, list), "The data must be a list object" train = data mapping = {0: 0, 1: 1, 2: 3, 3: 5} k = mapping[indicator] mapping_name = {0: 'cell_lines', 1: 'compounds', 2: 'doses', 3: 'time'} print("Number of Train Data: {}".format(len(train))) print("You are parsing the data base on {}".format(mapping_name[indicator])) parse_data = [line for line in train if line[0][k] in query] print("Number of Data after parsing: {}".format(len(parse_data))) return parse_data
67a0fa75c659a5bcbff2aa44e131a56096bf8865
683,047
def extend_flight_distance(distance: float) -> float: """Add factor to shortest flight distance to account for indirect flight paths Following https://www.icao.int/environmental-protection/CarbonOffset/Documents/Methodology%20ICAO%20Carbon%20Calculator_v11-2018.pdf section 4.2 we add a correction factor to represent deviations from the shortest path flown between points due to stacking, traffic and weather-driven corrections. Args: distance: Shortest distance - geodesic or Great Circle - between points in km Returns: Distance with additional correction factor """ if distance < 550: return distance + 50 elif distance < 5500: return distance + 100 return distance + 125
a4dedcbfcba663be757ba97cd28b6afac1c77a01
683,055
def triwhite(x, y): """ Convert x,y chromaticity coordinates to XYZ tristimulus values. """ X = x / y Y = 1.0 Z = (1-x-y)/y return [X, Y, Z]
dd2dd418c4643e10d3b9fd685bd6df1a3cc5afd7
683,056
def create_header(args): """Constructs the header row for the csv""" header = ["Propublica Number", "Org Name", "Tax Year", "Data Source", "PDF URL"] if args.totalrev: header.append("Total Revenue") if args.totalexp: header.append("Total Functional Expenses") if args.netinc: header.append("Net Income") if args.totalass: header.append("Total Assets") if args.totallia: header.append("Total Liabilities") if args.netass: header.append("Net Assets") if args.all: header = ["Propublica Number", "Org Name", "Tax Year", "Data Source", "PDF URL", "Total Revenue", "Total Functional Expenses", "Net Income", "Total Assets", "Total Liabilities", "Net" "Assets"] return header
b47dd3254262624e63b32cd269bef8890883707d
683,059
def predict_large_image(model, input_image): """Predict on an image larger than the one it was trained on All networks with U-net like architecture in this repo, use downsampling of 2, which is only conducive for images with shapes in powers of 2. If different, please crop / resize accordingly to avoid shape mismatches. :param keras.Model model: Model instance :param np.array input_image: as named. expected shape: [num_channels, (depth,) height, width] or [(depth,) height, width, num_channels] :return np.array predicted image: as named. Batch axis removed (and channel axis if num_channels=1) """ im_size = input_image.shape num_dims = len(im_size) assert num_dims in [4, 5], \ 'Invalid image shape: only 4D and 5D inputs - 2D / 3D ' \ 'images with channel and batch dim allowed' predicted_image = model.predict(input_image) return predicted_image
f762c997c953487df32e111babfb25059a2d344d
683,060
import logging def get_logger(*components) -> logging.Logger: """Get a logger under the app's hierarchy.""" name = '.'.join(['skipscale'] + list(components)) return logging.getLogger(name)
9060f07091ac19ae90a61b69b2eca3ef1daf1d05
683,065
def convert_to_SimpleIndex(data, axis=0): """ Converts the index of a DataFrame to a simple, one-level index The target index uses standard SimCenter convention to identify different levels: a dash character ('-') is used to separate each level of the index. Parameters ---------- data: DataFrame The DataFrame that will be modified. axis: int Identifies if the index (0) or the columns (1) shall be edited. Returns ------- data: DataFrame The modified DataFrame """ if axis == 0: simple_index = ['-'.join([str(id_i) for id_i in id]) for id in data.index] data.index = simple_index elif axis == 1: simple_index = ['-'.join([str(id_i) for id_i in id]) for id in data.columns] data.columns = simple_index else: raise ValueError(f"Invalid axis parameter: {axis}") return data
2f6ce3af39fd01314feeb0933821dbe67dbe8c1c
683,073
from typing import Iterable def hr_bytes(data: Iterable[int], delimiter: str = ' ') -> str: # pragma: no cover """ Print bytes (or another int iterable) as hex and delimiters :param data: Bytes or iterable object :param delimiter: Delimiter (str value) :return: str value (02 02 04 05 06 00) """ return delimiter.join(("%02x" % byte for byte in data))
a399b1da61fc5cef6ff071ea2ae7566036ae1726
683,075
def create_path(network, user_A, user_B, path=[]): """ Finds a connections path from user_A to user_B using recursion. It has to be an existing path but it DOES NOT have to be the shortest path. Circular loops returns None (for example, A is connected to B. B is connected to C. C is connected to B.). Arguments: network: The network created with create_data_structure. user_A: String holding the starting username. user_B: String holding the ending username. path: The current path(for recursion). Returns: A list showing the path from user_A to user_B. - If such a path does not exist, returns None. Sample output: print find_path_to_friend(network, "Abe", "Zed") ['Abe', 'Gel', 'Sam', 'Zed'] This implies that Abe is connected with Gel, who is connected with Sam, who is connected with Zed. """ path = path + [user_A] # all paths include starting node if user_A == user_B: # id the last node is user_B a valid path exists return path # base case for node in network[user_A][0]: if node not in path: # otherwise path is an infinite loop path = create_path(network, node, user_B, path) if path: # after the recursion hits the base case return path return None
b3e88f4e18c0ab86c971706ea74e0a1739a61a2e
683,077
import math def chunk(iterable, n): """Splits a list into n equal parts""" iterable = [e for e in iterable] avg_length = int(math.ceil(len(iterable) / n)) return [iterable[i * avg_length:(i + 1) * avg_length] for i in range(n)]
74d649b8db2625861db6110733a0ea8342541657
683,079
def first_down(items): """Return True if the first item is down.""" return items[0] == '-'
e24afe79971572de01676bda608a317c83fb7792
683,080
def unsort(sorted_list, oidx): """ Unsort a sorted list, based on the original idx. """ assert len(sorted_list) == len(oidx), "Number of list elements must match with original indices." _, unsorted = [list(t) for t in zip(*sorted(zip(oidx, sorted_list)))] return unsorted
fc54770d389029d36e598035b82264b325d76940
683,083
def figure_linguistic_type(labels): """ Gets linguistic type for labels Parameters ---------- labels : list of lists the labels of a tier Returns ------- the linguistic type """ if len(labels) == 0: return None elif len(labels) == 1: return labels[0][0] label = min(labels, key=lambda x: x[1]) return label[0]
14151917bb9ad8f49717ce6c436c496ee3ccfc77
683,084
def get_rank(cutoff: dict, coverage: float, quality: float, length: int, contigs: int, genome_size: int, is_paired: bool) -> list: """ Determine the rank (gold, silver, bronze, fail) based on user cutoffs. Args: cutoff (dict): Cutoffs set by users to determine rank coverage (float): Estimated coverage of the sample quality (float): Per-read average quality length (int): Median length of reads contigs (int): Total number of contigs genome_size (int): Genome size of sample used in analysis is_paired (bool): Sample used paired-end reads Returns: list: the rank and reason for the ranking """ rank = None reason = [] coverage = float(f'{float(coverage):.2f}') quality = float(f'{float(quality):.2f}') length = round(float(f'{float(length):.2f}')) contigs = int(contigs) genome_size = int(genome_size) gold = cutoff['gold'] silver = cutoff['silver'] bronze = cutoff['bronze'] if coverage >= gold['coverage'] and quality >= gold['quality'] and length >= gold['length'] and contigs <= gold['contigs'] and is_paired: reason.append('passed all cutoffs') rank = 'gold' elif coverage >= silver['coverage'] and quality >= silver['quality'] and length >= silver['length'] and contigs <= silver['contigs'] and is_paired: if coverage < gold['coverage']: reason.append(f"Low coverage ({coverage:.2f}x, expect >= {gold['coverage']}x)") if quality < gold['quality']: reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{gold['quality']})") if length < gold['length']: reason.append(f"Short read length ({length}bp, expect >= {gold['length']} bp)") if contigs > gold['contigs']: reason.append(f"Too many contigs ({contigs}, expect <= {gold['contigs']})") rank = 'silver' elif coverage >= bronze['coverage'] and quality >= bronze['quality'] and length >= bronze['length'] and contigs <= bronze['contigs']: if coverage < silver['coverage']: reason.append(f"Low coverage ({coverage:.2f}x, expect >= {silver['coverage']}x)") if quality < silver['quality']: reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{silver['quality']})") if length < silver['length']: reason.append(f"Short read length ({length}bp, expect >= {silver['length']} bp)") if contigs > silver['contigs']: reason.append(f"Too many contigs ({contigs}, expect <= {silver['contigs']})") if not is_paired: reason.append(f"Single-end reads") rank = 'bronze' if not rank: rank = 'exclude' if coverage < bronze['coverage']: reason.append(f"Low coverage ({coverage:.2f}x, expect >= {bronze['coverage']}x)") if quality < bronze['quality']: reason.append(f"Poor read quality (Q{quality:.2f}, expect >= Q{bronze['quality']})") if length < bronze['length']: reason.append(f"Short read length ({length:.2f}bp, expect >= {bronze['length']} bp)") if contigs > bronze['contigs']: reason.append(f"Too many contigs ({contigs}, expect <= {bronze['contigs']})") if cutoff['min-assembled-size']: if genome_size < cutoff['min-assembled-size']: reason.append(f"Assembled size is too small ({genome_size} bp, expect <= {cutoff['min-assembled-size']})") if cutoff['max-assembled-size']: if genome_size < cutoff['max-assembled-size']: reason.append(f"Assembled size is too large ({genome_size} bp, expect <= {cutoff['max-assembled-size']})") reason = ";".join(sorted(reason)) return [rank, reason]
2cd31199cb555c6bbbc4cec87e806ed4fcf6c983
683,086
def filter_empty_values(mapping_object: dict) -> dict: """Remove entries in the dict object where the value is `None`. >>> foobar = {'username': 'rafaelcaricio', 'team': None} >>> filter_empty_values(foobar) {'username': 'rafaelcaricio'} :param mapping_object: Dict object to be filtered """ return {key: val for key, val in mapping_object.items() if val is not None}
582e7874c96b261779f5a2d6b5a6e5a37b89ec81
683,091
def axis_ticklabels_overlap(labels): """Return a boolean for whether the list of ticklabels have overlaps. Parameters ---------- labels : list of ticklabels Returns ------- overlap : boolean True if any of the labels overlap. """ if not labels: return False try: bboxes = [l.get_window_extent() for l in labels] overlaps = [b.count_overlaps(bboxes) for b in bboxes] return max(overlaps) > 1 except RuntimeError: # Issue on macosx backend rasies an error in the above code return False
8b9edc2b97ae00976a573aefac5067568328ae05
683,092
def logreg_classifier_to_dict(classifier, feature_names=None): """ Serialize sklearn logistic regression classifier Inspired by https://stackoverflow.com/questions/48328012/python-scikit-learn-to-json Parameters ---------- classifier : sklearn.linear_model.LogisticRegression Logistic regression classifier to be serialized feature_names : list(str) Feature names of coefficients in classifier, must be in the same order as classifier.coef_ Returns ------- model : dict Serialized classifier """ params = { "classifier_settings": classifier.get_params(), "model_settings": {} } for attrib in ["classes_", "intercept_", "coef_", "n_iter_"]: params["model_settings"][attrib] = getattr(classifier, attrib).tolist() if feature_names is not None: params["feature_names"] = feature_names return params
081330c53eb4dcded9b92c0ee9f878b4bf208b13
683,098
def add(x, y): """This is a doc string :param x: first value to add :param y: second value to add :returns: x + y """ return x + y
421e7d34f7549235694cbdf4b9ec97021e06b46b
683,099
def remove_atom_maps(mol): """ Iterates through the atoms in a Mol object and removes atom maps Parameters: mol (rdkit.Mol): Rdkit Mol Object Returns: mol (rdkit.Mol): Rdkit Mol Object without atom maps or 'nan' if error """ try: for atom in mol.GetAtoms(): atom.SetAtomMapNum(0) return mol except: return float('nan')
213b155d5ed8142e7ca75d594ad3a8e296b44804
683,108