content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def tag_case(tag, uppercased, articles): """ Changes a tag to the correct title case while also removing any periods: 'U.S. bureau Of Geoinformation' -> 'US Bureau of Geoinformation'. Should properly upper-case any words or single tags that are acronyms: 'ugrc' -> 'UGRC', 'Plss Fabric' -> 'PLSS Fabric'. Any words separated by a hyphen will also be title-cased: 'water-related' -> 'Water-Related'. Note: No check is done for articles at the begining of a tag; all articles will be lowercased. tag: The single or multi-word tag to check uppercased: Lower-cased list of words that should be uppercased (must be lower-cased to facilitate checking) articles: Lower-cased list of words that should always be lower-cased: 'in', 'of', etc """ new_words = [] for word in tag.split(): cleaned_word = word.replace('.', '') #: Upper case specified words: if cleaned_word.lower() in uppercased: new_words.append(cleaned_word.upper()) #: Lower case articles/conjunctions elif cleaned_word.lower() in articles: new_words.append(cleaned_word.lower()) #: Title case everything else else: new_words.append(cleaned_word.title()) return ' '.join(new_words)
2447453964d16db201fa63aa14053e5e2d25ebe7
686,707
def link_album(album_id): """Generates a link to an album Args: album_id: ID of the album Returns: The link to that album on Spotify """ return "https://open.spotify.com/album/" + album_id
dbca10b94c22e46b494ed98152db5bba754ee28e
686,709
def fetch_seq_pygr(chr, start, end, strand, genome): """Fetch a genmoic sequence upon calling `pygr` `genome` is initialized and stored outside the function Parameters ---------- chr : str chromosome start : int start locus end : int end locuse strand : str either '+' or '-' Returns -------- seq : str genomic sequence in upper cases """ try: seq = genome[chr][start:end] if strand == "-": seq = -seq except Exception as e: raise Exception('pygr cannot fetch sequences; %s' % e) return str(seq).upper()
9c104af76cc32d0f90437c4c9e9d389af7c75640
686,710
def compute_metrics(y_true, y_pred, metrics): """ Computation of a given metric Parameters ---------- y_true: True values for y y_pred: Predicted values for y. metrics: metric Chosen performance metric to measure the model capabilities. Returns ------- dict Performance metric and its value""" return {metric_name: func(y_true, y_pred) for metric_name, func in metrics.items()}
19f07ead2c728ff86cc1b2879f4abe196a567575
686,713
def centerOfGravity( array ): """ Vypočítá ze zadaých bodů jejich těžiště. Parametry: ---------- array: list Pole s body. Vrací: ----- list Pole s X- a Y-ovou souřadnicí těžiště. """ sum_X = 0 sum_Y = 0 # Sečte X-ové a Y-ové souřadnice všech bodů for element in array: sum_X += element[0] sum_Y += element[1] # Vratí průměr. return sum_X/len(array), sum_Y/len(array)
a5ef7714b0ea97258530ba3d2d6d0483ca82623a
686,714
def rk4(y, x, dx, f): """computes 4th order Runge-Kutta for dy/dx. y is the initial value for y x is the initial value for x dx is the difference in x (e.g. the time step) f is a callable function (y, x) that you supply to compute dy/dx for the specified values. """ k1 = dx * f(y, x) k2 = dx * f(y + 0.5*k1, x + 0.5*dx) k3 = dx * f(y + 0.5*k2, x + 0.5*dx) k4 = dx * f(y + k3, x + dx) return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
73e83f0d3818f0db3b0a3c657d256e27c13bae1e
686,715
from typing import Dict from typing import Any def add_parser_options(options: Dict[str, Dict[str, Any]], parser_id: str, parser_options: Dict[str, Dict[str, Any]], overwrite: bool = False): """ Utility method to add options for a given parser, to the provided options structure :param options: :param parser_id: :param parser_options: :param overwrite: True to silently overwrite. Otherwise an error will be thrown :return: """ if parser_id in options.keys() and not overwrite: raise ValueError('There are already options in this dictionary for parser id ' + parser_id) options[parser_id] = parser_options return options
ad0e43abce2a7c41ac39da759e42f0b228cf5343
686,716
from typing import List def distributed_axial(w:float, L:float, l1:float, l2:float) -> List[float]: """ Case 6 from Matrix Analysis of Framed Structures [Aslam Kassimali] """ Fa = w/(2*L) * (L-l1-l2) * (L-l1+l2) Fb = w/(2*L) * (L-l1-l2) * (L+l1-l2) return [Fa, Fb]
18608713f411998e390e8af26fa01a9be499774b
686,720
from datetime import datetime def parse_time(time_string): """Parse a time string from the config into a :class:`datetime.time` object.""" formats = ['%I:%M %p', '%H:%M', '%H:%M:%S'] for f in formats: try: return datetime.strptime(time_string, f).time() except ValueError: continue raise ValueError('invalid time `%s`' % time_string)
8ff237f980ed69c2293cd2893f56182ce1dea8b8
686,722
from typing import Dict def format_link_object(url: str, text: str) -> Dict[str, str]: """ Formats link dictionary object. """ return {"href": url, "text": text}
ca6f45f98f878517b8ff7795269a0084caeda252
686,727
def mul(args): """ Multiply the all the numbers in the list Args(list): A list of number Return(int or float): The computing result """ result = 1 for num in args: result *= num return result
38d650088195b9826a964511beef50108bf9bdff
686,732
import base64 def ddb_to_json(ddb_item): """Convert a DDB item to a JSON-compatible format. For now, this means encoding any binary fields as base64. """ json_item = ddb_item.copy() for attribute in json_item: for value in json_item[attribute]: if value == "B": json_item[attribute][value] = base64.b64encode(json_item[attribute][value]).decode("utf-8") return json_item
bd24925e685db8c1096473ffbfcfb3329a4c55fc
686,733
import pwd def valid_user(username): """ Returns True if the username given is a valid username on the system """ try: if username and pwd.getpwnam(username): return True except KeyError: # getpwnam returns a key error if entry isn't present return False return False
542764751abc0b353dbce53b4d6beeb2464496d5
686,734
import time def datetime_to_timestamp(a_date): """Transform a datetime.datetime to a timestamp microseconds are lost ! :type a_date: datetime.datetime :rtype: float """ return time.mktime(a_date.timetuple())
6bdddfdcff92bae2ad60a678f8001f2bb55c8706
686,735
import asyncio async def check_address(host: str, port: int = 80, timeout: int = 2) -> bool: """ Async version of test if an address (IP) is reachable. Parameters: host: str host IP address or hostname port : int HTTP port number Returns ------- awaitable bool """ try: reader, writer = await asyncio.wait_for( asyncio.open_connection(host, port), timeout=timeout ) writer.close() await writer.wait_closed() return True except: return False
e08da38636c66a59948adc4fa08132f9f7438db9
686,738
def convert_distance(val, old_scale="meter", new_scale="centimeter"): """ Convert from a length scale to another one among meter, centimeter, inch, feet, and mile. Parameters ---------- val: float or int Value of the length to be converted expressed in the original scale. old_scale: str Original scale from which the length value will be converted. Supported scales are Meter ['Meter', 'meter', 'm'], Centimeter ['Centimeter', 'centimeter', 'vm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil']. new_scale: str New scale from which the length value will be converted. Supported scales are Meter ['Meter', 'meter', 'm'], Centimeter ['Centimeter', 'centimeter', 'cm'], Inch ['Inch', 'inch', 'in'], Feet ['Feet', 'feet', 'ft'] or Mile ['Mile', 'mile', 'mil']. Raises ------- NotImplementedError if either of the scales are not one of the requested ones. Returns ------- res: float Value of the converted length expressed in the new scale. """ # Convert from 'old_scale' to Meter if old_scale.lower() in ['centimeter', 'cm']: temp = val / 100.0 elif old_scale.lower() in ['meter', 'm']: temp = val elif old_scale.lower() in ['inch', 'in']: temp = val / 39.37008 elif old_scale.lower() in ['feet', 'ft']: temp = val / 3.28084 elif old_scale.lower() in ['mile', 'mil']: temp = 1609.344 * val else: raise AttributeError( f'{old_scale} is unsupported. m, cm, ft, in and mile are supported') # and from Meter to 'new_scale' if new_scale.lower() in ['centimeter', 'cm']: result = 100*temp elif new_scale.lower() in ['meter', 'm']: result = temp elif new_scale.lower() in ['inch', 'in']: result= 39.37008*temp elif new_scale.lower() in ['feet', 'ft']: result=3.28084*temp elif new_scale.lower() in ['mile', 'mil']: result=temp/1609.344 else: raise AttributeError( f'{new_scale} is unsupported. m, cm, ft, in and mile are supported') return result
46cac6149753a2231e040c2507b71bbc23a3f607
686,740
def get_cell_connection(cell, pin): """ Returns the name of the net connected to the given cell pin. Returns None if unconnected """ # Only for subckt assert cell["type"] == "subckt" # Find the connection and return it for i in range(1, len(cell["args"])): p, net = cell["args"][i].split("=") if p == pin: return net # Not found return None
3fd2b9bb5df7818d3cd47323890d210ff88eb27a
686,741
import re def nest(text, nest): """ Indent documentation block for nesting. Args: text (str): Documentation body. nest (int): Nesting level. For each level, the final block is indented one level. Useful for (e.g.) declaring structure members. Returns: str: Indented reST documentation string. """ return re.sub('(?m)^(?!$)', ' ' * nest, text)
a985409bae7177295368ad472c63ba0ea972375d
686,743
from typing import Union import time def time_format(seconds: float, format_='%H:%M:%S') -> Union[str, float]: """ Default format is '%H:%M:%S' >>> time_format(3600) '01:00:00' """ # this because NaN if seconds >= 0 or seconds < 0: time_ = time.strftime(format_, time.gmtime(abs(seconds))) if seconds < 0: return f"-{time_}" return time_ return seconds
41116e13c2e93255b0e2512a9fad69bca020ed69
686,744
def validate_identifier(key: str): """Check if key starts with alphabetic or underscore character.""" key = key.strip() return key[0].isalpha() or key.startswith("_")
8eb04927e53a71a3e1f9475503e44d034dd3350f
686,746
import struct def _single_byte_from_hex_string(h): """Return a byte equal to the input hex string.""" try: i = int(h, 16) if i < 0: raise ValueError except ValueError: raise ValueError("Can't convert hex string to a single byte") if len(h) > 2: raise ValueError("Hex string can't be more than one byte in size") if len(h) == 2: return struct.pack("B", i) elif len(h) == 1: return struct.pack("B", i << 4)
59a08cb8dafc96e4ee0234ef14c8ca849595aa50
686,748
def mean(num_list): """ Computes the mean of a list of numbers Parameters ---------- num_list : list List of number to calculate mean of Returns ------- mean : float Mean value of the num_list """ # Check that input is of type list if not isinstance(num_list, list): raise TypeError('Invalid input %s - must be type list' % (num_list)) list_sum = 0.0 list_len = len(num_list) # Check that the list is not empty if list_len == 0: raise ValueError("Num list is empty") for el in num_list: list_sum += el return list_sum / list_len
7932b52a44515ec118f67365609b979452aee0eb
686,749
import csv def get_rows(input_tsv): """Parse input CSV into list of rows Keyword arguments: input_tsv -- input TSV containing study metadata Returns _csv.reader -- content of input file """ tsv_rows = csv.reader(open(input_tsv, 'r'), delimiter='\t') return tsv_rows # see tsv2xml.py
f2dffa7bcf7f0f4a1e3b48fc443ae489d06429cd
686,752
def get_queue(conn, queue_name): """ Create a queue with the given name, or get an existing queue with that name from the AWS connection. """ return conn.get_queue(queue_name)
3984ef179cfc74336067e217795308ed4768e736
686,756
def check_reversibility(reaction): """ Return: - *forward* if reaction is irreversible in forward direction - *backward* if reaction is irreversible in the rbackward (reverse) direction - *reversible* if the reaction is reversible - *blocked* if the reactions is blocked """ if (reaction.lower_bound < 0) and (reaction.upper_bound == 0): return "backward" elif (reaction.lower_bound == 0) and (reaction.upper_bound > 0): return "forward" elif (reaction.lower_bound == 0) and (reaction.upper_bound == 0): return "blocked" else: return "reversible"
8a3047f85a640457e39762f22916ad6c8d47d183
686,759
def summ_nsqr(i1, i2): """Return the summation from i1 to i2 of n^2""" return ((i2 * (i2+1) * (2*i2 + 1)) / 6) - (((i1-1) * i1 * (2*(i1-1) + 1)) / 6)
bcd2263be6b5698f2997e0daac962c207e318fd0
686,765
def _cost_to_qubo(cost) -> dict: """Get the the Q matrix corresponding to the given cost. :param cost: cost :return: QUBO matrix """ model = cost.compile() Q = model.to_qubo()[0] return Q
4b0691335a82ca8a32033619247b50c465dfbe9d
686,766
def base_count(DNA): """Counts number of Nucleotides""" return DNA.count('A'), DNA.count('T'), DNA.count('G'), DNA.count('C')
64fb081fc5f510b3d55b4afb0c1f9c8b6ee89fb9
686,772
def alphabetical_value(name): """ Returns the alphabetical value of a name as described in the problem statement. """ return sum([ord(c) - 64 for c in list(str(name))])
0f98fb1efc79f9ca29b87b1d5e0de35412f41ee2
686,773
def GetFilters(user_agent_string, js_user_agent_string=None, js_user_agent_family=None, js_user_agent_v1=None, js_user_agent_v2=None, js_user_agent_v3=None): """Return the optional arguments that should be saved and used to query. js_user_agent_string is always returned if it is present. We really only need it for Chrome Frame. However, I added it in the generally case to find other cases when it is different. When the recording of js_user_agent_string was added, we created new records for all new user agents. Since we only added js_document_mode for the IE 9 preview case, it did not cause new user agent records the way js_user_agent_string did. js_document_mode has since been removed in favor of individual property overrides. Args: user_agent_string: The full user-agent string. js_user_agent_string: JavaScript ua string from client-side js_user_agent_family: This is an override for the family name to deal with the fact that IE platform preview (for instance) cannot be distinguished by user_agent_string, but only in javascript. js_user_agent_v1: v1 override - see above. js_user_agent_v2: v1 override - see above. js_user_agent_v3: v1 override - see above. Returns: {js_user_agent_string: '[...]', js_family_name: '[...]', etc...} """ filters = {} filterdict = { 'js_user_agent_string': js_user_agent_string, 'js_user_agent_family': js_user_agent_family, 'js_user_agent_v1': js_user_agent_v1, 'js_user_agent_v2': js_user_agent_v2, 'js_user_agent_v3': js_user_agent_v3 } for key, value in filterdict.items(): if value is not None and value != '': filters[key] = value return filters
9e924d9311a9838cf2e09cd11ea7f3e957466610
686,774
def filter_api_changed(record): """Filter out LogRecords for requests that poll for changes.""" return not record.msg.endswith('api/changed/ HTTP/1.1" 200 -')
caa93f19ce00238786ae0c1687b7e34994b73260
686,775
def apply_decorators(decorators): """Apply multiple decorators to the same function. Useful for reusing common decorators among many functions.""" def wrapper(func): for decorator in reversed(decorators): func = decorator(func) return func return wrapper
acd1f6af5eb757aeb3e707e84d1893ebf049e2f0
686,776
def turn_weight_function_distance(v, u, e, pred_node): """ Weight function used in modified version of Dijkstra path algorithm. Weight is calculated as the sum of edge length weight and turn length weight (turn length weight keyed by predecessor node) This version of the function takes edge lengths keyed with 'distance' Args: v (var): edge start node u (var): edge end node e (dict): edge attribute dictionary with keys pred_node (var): predecessor node Returns: calculated edge weight (float) """ if pred_node == None: weight = e[0]['distance'] else: weight = e[0]['distance'] + e[0]['turn_length'][pred_node] return(weight)
c463f314877b9a40b428fcc9d4e26fae4eacccc6
686,777
import string def LazyFormat(s, *args, **kwargs): """Format a string, allowing unresolved parameters to remain unresolved. Args: s: str, The string to format. *args: [str], A list of strings for numerical parameters. **kwargs: {str:str}, A dict of strings for named parameters. Returns: str, The lazily-formatted string. """ class SafeDict(dict): def __missing__(self, key): return '{' + key + '}' return string.Formatter().vformat(s, args, SafeDict(kwargs))
53039d096b8600a11d5220c47c51ee36fe4e3eb9
686,779
def find_gcd(number1: int, number2: int) -> int: """Returns the greatest common divisor of number1 and number2.""" remainder: int = number1 % number2 return number2 if remainder == 0 else find_gcd(number2, remainder)
966270f6299ebf2f95dfd0d867ce29ed8bb5ee89
686,784
def rect_area(r): """Return the area of a rectangle. Args: r: an object with attributes left, top, width, height Returns: float """ return float(r.width) * float(r.height)
bd78896dcfe1b128602218d961162efb23f1a612
686,787
def ascii2int(str, base=10, int=int): """ Convert a string to an integer. Works like int() except that in case of an error no exception raised but 0 is returned; this makes it useful in conjunction with map(). """ try: return int(str, base) except: return 0
c797e8d93dbbb653d738fc506577395931036cab
686,789
def create_chord_progression(a_train=False): """Creates a midi chord progression Args: a_train (bool, optional): Defaults at False. If True, returns chord progression for Take the A Train by Duke Ellington. Otherwise, returns standard 12-bar blues in C major. Returns: (int, int)[]: Returns chord progression, which is list of (chord_root, dur) """ chord_progression = [] if not a_train: for _ in range(4): chord_progression.append((1, 32)) for _ in range(2): chord_progression.append((4, 32)) for _ in range(2): chord_progression.append((1, 32)) for _ in range(1): chord_progression.append((5, 32)) for _ in range(1): chord_progression.append((4, 32)) for _ in range(2): chord_progression.append((1, 32)) else: for _ in range(2): # 16 measures for _ in range(2): chord_progression.append((1, 32)) for _ in range(2): chord_progression.append((2, 32)) for _ in range(1): chord_progression.append((3, 32)) for _ in range(1): chord_progression.append((4, 32)) for _ in range(2): chord_progression.append((1, 32)) for _ in range(4): # 8 measures chord_progression.append((5, 32)) for _ in range(2): chord_progression.append((4, 32)) for _ in range(1): chord_progression.append((3, 32)) for _ in range(1): chord_progression.append((6, 32)) for _ in range(2): # 8 measures chord_progression.append((1, 32)) for _ in range(2): chord_progression.append((2, 32)) for _ in range(1): chord_progression.append((3, 32)) for _ in range(1): chord_progression.append((6, 32)) for _ in range(2): chord_progression.append((1, 32)) return chord_progression
e187f06960292d915025063d56001579ef5cbb90
686,792
import itertools def subsets(parent_set, include_empty=False, include_self=False): """ Given a set of variable names, return all subsets :param parent_set: a set, or tuple, of variables :return: a set of tuples, each one denoting a subset """ s = set() n = len(parent_set) for i in range(n - 1): for combo in itertools.combinations(list(parent_set), i + 1): s.add(tuple(sorted(combo))) if include_empty: s.add(tuple()) if include_self: s.add(tuple(sorted(parent_set))) return s
ca3dcbf620f8dc29e1f0a14ec4bf74abdbdeacd1
686,793
from typing import Any from typing import Optional def _guess_crs_str(crs_spec: Any)->Optional[str]: """ Returns a string representation of the crs spec. Returns `None` if it does not understand the spec. """ if isinstance(crs_spec, str): return crs_spec if hasattr(crs_spec, 'to_epsg'): epsg = crs_spec.to_epsg() if epsg is not None: return 'EPSG:{}'.format(crs_spec.to_epsg()) if hasattr(crs_spec, 'to_wkt'): return crs_spec.to_wkt() return None
11328ea9d1cc955faa63c37a64d115e8252b0c57
686,799
from typing import Iterable from typing import List def unique_by(pairs: Iterable[tuple]) -> List: """Return a list of items with distinct keys. pairs yields (item, key).""" # return a list rather than a set, because items might not be hashable return list({key: item for item, key in pairs}.values())
b88734701dcb46532e4a40695e56934bd24e03dd
686,801
def area_of_polygon(x, y): """Calculates the area of an arbitrary polygon given its verticies""" area = 0.0 for i in range(-1, len(x)-1): area += x[i] * (y[i+1] - y[i-1]) return abs(area) / 2.0
003a7ffde3c1016113da0129583e92674eef8556
686,802
def _set_default_max_rated_temperature(subcategory_id: int) -> float: """Set the default maximum rated temperature. :param subcategory_id: the subcategory ID of the inductive device with missing defaults. :return: _rated_temperature_max :rtype: float """ return 130.0 if subcategory_id == 1 else 125.0
f2f86d899b9404622b3c01f89416c7e0c8cab393
686,808
def SimpleMaxMLECheck(BasinDF): """ This function checks through the basin dataframe and returns a dict with the basin key and the best fit m/n Args: BasinDF: pandas dataframe from the basin csv file. Returns: m_over_n_dict: dictionary with the best fit m/n for each basin, the key is the basin key and the value is the best fit m/n Author: FJC """ # read in the basin csv basin_keys = list(BasinDF['basin_key']) #print BasinDF # remove the first 2 columns from DF del BasinDF['basin_key'] del BasinDF['outlet_jn'] # now find the index and value of the max MLE in each row MOverNs = list(BasinDF.idxmax(axis=1)) MOverNs = [float(x.split()[-1]) for x in MOverNs] print ("MAX MOVERNS") print (MOverNs) # zip into a dictionary MOverNDict = dict(zip(basin_keys, MOverNs)) return MOverNDict
db833f1f7d5fbe140ed0dc92ef99dc7ff138523c
686,812
import re def get_pkgver(pkginfo): """Extracts the APK version from pkginfo; returns string.""" try: pkgver = re.findall("versionName=\'(.*?)\'", pkginfo)[0] pkgver = "".join(pkgver.split()).replace("/", "") except: pkgver = "None" return pkgver
872c2acb284d9fc3165bb84e1b10ebb46e827738
686,815
def getBBox(df, cam_key, frame, fid): """ Returns the bounding box of a given fish in a given frame/cam view. Input: df: Dataset Pandas dataframe cam_key: String designating the camera view (cam1 or cam2) frame: int indicating the frame which should be looked up fid: int indicating the fish ID Output: Tuple containing the top left x/y coordiante and the bottom right x/y coordinates """ df_f = df[(df["id"] == fid) & (df["{}_frame".format(cam_key)] == frame)] return (df_f["{}_tl_x".format(cam_key)].values, df_f["{}_tl_y".format(cam_key)].values, df_f["{}_br_x".format(cam_key)].values, df_f["{}_br_y".format(cam_key)].values)
dcbe5f8cc17c5913d712f543d01c5e8b71f741e4
686,816
def constituent_copyin_subname(host_model): ############################################################################### """Return the name of the subroutine to copy constituent fields to the host model. Because this is a user interface API function, the name is fixed.""" return "{}_ccpp_gather_constituents".format(host_model.name)
b752b590127a0421caa531daccc4fbdf61728abe
686,817
import codecs def check_encoding(stream, encoding): """Test, whether the encoding of `stream` matches `encoding`. Returns :None: if `encoding` or `stream.encoding` are not a valid encoding argument (e.g. ``None``) or `stream.encoding is missing. :True: if the encoding argument resolves to the same value as `encoding`, :False: if the encodings differ. """ if stream.encoding == None: return None try: return codecs.lookup(stream.encoding) == codecs.lookup(encoding) except (LookupError, AttributeError, TypeError): return None
d9957826e34ec55476fcf101ca013e206582cb33
686,820
def _list_distance(list1, list2, metric): """ Calculate distance between two lists of the same length according to a given metric. Assumes that lists are of the same length. Args: list1 (list): first input list. list2 (list): second input list. metric (str): 'identity' counts identical positions, 'euclid' calculates the Euclidean distance (L2 norm), 'taxi' calculates the taxicab (Manhattan) distance (L1 norm), 'sup' returns maximum distance between positions, 'inf' returns minimum distance between positions. Returns distance between the two lists. Raises: ValueError if unsupported distance metric used. """ # Equal length of both lists is assumed distance = 0 if metric == 'taxi': distance = sum([abs(list1[pos] - list2[pos]) for pos in range(len(list1))]) return distance elif metric == 'euclid': distance = sum([(list1[pos] - list2[pos]) ** 2 for pos in range(len(list1))]) return distance ** 0.5 elif metric == 'identity': distance = sum([list1[pos] != list2[pos] for pos in range(len(list1))]) return distance elif metric == 'sup': distance = max([abs(list1[pos] - list2[pos]) for pos in range(len(list1))]) return distance elif metric == 'inf': distance = min([abs(list1[pos] - list2[pos]) for pos in range(len(list1))]) return distance else: raise ValueError("Proper distance calculation metrics are \'taxi\',\ \'euclid\', \'identity\', \'sup\', or \'inf\'.")
bd8b98a13d070b0123dc20792abd8485ae247082
686,823
import torch from typing import Type def count_module_instances(model: torch.nn.Module, module_class: Type[torch.nn.Module]) -> int: """ Counts the number of instances of module_class in the model. Example: >>> model = nn.Sequential([nn.Linear(16, 32), nn.Linear(32, 64), nn.ReLU]) >>> count_module_instances(model, nn.Linear) 2 >>> count_module_instances(model, (nn.Linear, nn.ReLU)) 3 Args: model (torch.nn.Module): Source model module_class (Type[torch.nn.Module]): module_class to count. Can also be a tuple of classes. Returns: int: The number of instances of `module_class` in `model` """ count = 0 for _, child in model.named_children(): if isinstance(child, module_class): count += 1 count += count_module_instances(child, module_class) return count
28a4c8914fca34be8562802d2c337804cc3690d1
686,824
def get_all_layers(model): """ Get all layers of model, including ones inside a nested model """ layers = [] for l in model.layers: if hasattr(l, 'layers'): layers += get_all_layers(l) else: layers.append(l) return layers
885159b8d91a53caed55be08503f6738bf114eeb
686,825
def escape_markdown(raw_string: str) -> str: """Returns a new string which escapes all markdown metacharacters. Args ---- raw_string : str A string, possibly with markdown metacharacters, e.g. "1 * 2" Returns ------- A string with all metacharacters escaped. Examples -------- :: escape_markdown("1 * 2") -> "1 \\* 2" """ metacharacters = ["\\", "*", "-", "=", "`", "!", "#", "|"] result = raw_string for character in metacharacters: result = result.replace(character, "\\" + character) return result
6e0518dcfe9a09a1be5846bd7da92ffccf2f6368
686,826
import csv def detect_csv_sep(filename: str) -> str: """Detect the separator used in a raw source CSV file. :param filename: The name of the raw CSV in the raw/ directory :type filename: str :return: The separator string :rtype: str """ sep = '' with open(f'raw/{filename}',"r") as csv_file: res = csv.Sniffer().sniff(csv_file.read(1024)) csv_file.seek(0) sep = res.delimiter return sep
ad7b26dfd5194c26bd3b32d0bcb3590410a121d2
686,829
def get_restraints(contact_f): """Parse a contact file and retrieve the restraints.""" restraint_dic = {} with open(contact_f) as fh: for line in fh.readlines(): if not line: # could be empty continue data = line.split() res_i = int(data[0]) chain_i = data[1] res_j = int(data[3]) chain_j = data[4] if chain_i not in restraint_dic: restraint_dic[chain_i] = [] if chain_j not in restraint_dic: restraint_dic[chain_j] = [] if res_i not in restraint_dic[chain_i]: restraint_dic[chain_i].append(res_i) if res_j not in restraint_dic[chain_j]: restraint_dic[chain_j].append(res_j) return restraint_dic
e61c825d25dc9fb50f10bd8091f98dd997d278cc
686,831
def easy_name(s): """Remove special characters in column names""" return s.replace(':', '_')
7ce1ecaa22b84aa38f86d091740b5d669231ca90
686,832
def skymapweights_keys(self): """ Return the list of string names of valid weight attributes. For unpolarized weights, this list includes only TT. Otherwise, the list includes all six unique weight attributes in row major order: TT, TQ, TU, QQ, QU, UU. """ if self.polarized: return ['TT', 'TQ', 'TU', 'QQ', 'QU', 'UU'] return ['TT']
f7e69f39abfbb35bd12b44a838ce76f3145cdfe7
686,834
def one_lvl_colnames(df,cols,tickers): """This function changes a multi-level column indexation into a one level column indexation Inputs: ------- df (pandas Dataframe): dataframe with the columns whose indexation will be flattened. tickers (list|string): list/string with the tickers (s) in the data frame df. cols (list|string): list/string with the name of the columns (e.g. 'Adj Close', 'High', 'Close', etc.) that are in the dataframe df. Ouputs: ------- df (pandas Dataframe): dataframe with the same information as df, but with one level of indexation. """ # Define important variables: if isinstance(tickers, str): tickers = [tickers] if isinstance(cols, str): cols = [cols] # For multi-level column indexing: if isinstance(df.columns.values[0], tuple): # Define important varibles columns = df.columns.values new_cols = [] # Itarate through the multi-level column names and flatten them: for col in columns: temp = [] for name in col: if name != '': temp.append(name) new_temp = '_'.join(temp) new_cols.append(new_temp) # Change the column names: df.columns = new_cols # For uni-level colum indexing: elif isinstance(df.columns.values[0], str): # Define new names: col_names = [column+'_'+ticker for column in cols\ for ticker in tickers] df.columns = col_names return df
2252916dd6199a4c87345fedb7bdf5103d2363df
686,835
def has_imm(opcode): """Returns True if the opcode has an immediate operand.""" return bool(opcode & 0b1000)
0aa7315f7a9d53db809ce1340c524400ead60799
686,836
from typing import Iterator def second(itr: Iterator[float]) -> float: """Returns the second item in an iterator.""" next(itr) return next(itr)
74e85436ed9b763f262c94e3898455bd24d75028
686,837
def _run_subsuite(args): """ Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. This helper lives at module-level and its arguments are wrapped in a tuple because of the multiprocessing module's requirements. """ runner_class, subsuite_index, subsuite, failfast = args runner = runner_class(failfast=failfast) result = runner.run(subsuite) return subsuite_index, result.events
a51b0d7c3abed528ceaf3cbe16059e6e816074c6
686,838
from typing import List import re def parse_seasons(seasons: List[str]): """Parses a string of seasons into a list of seasons. Used by main and by collect_course_length main""" parsed_seasons = [] for season in seasons: if re.match("[0-9]{4}\-[0-9]{4}", season): int_range = list(map(int, season.split("-"))) # increase end so our range is inclusive int_range[1] += 1 parsed_seasons += list(map(str, range(*int_range))) elif re.match("[0-9]{4}", season): parsed_seasons.append(season) else: raise ValueError("The season must be a valid 4-digit year") return parsed_seasons
9dbc63cab1a0839360384aea8c4cdec88605d767
686,840
def _copy_dict(dct, description): """Return a copy of `dct` after overwriting the `description`""" _dct = dct.copy() _dct['description'] = description return _dct
557bc7da87069846c088983228079d3c762af69c
686,842
def poly_from_box(W, E, N, S): """ Helper function to create a counter-clockwise polygon from the given box. N, E, S, W are the extents of the box. """ return [[W, N], [W, S], [E, S], [E, N]]
ed1bf13dfc2e9eb405789d98182338d576178124
686,843
def valfilterfalse(predicate, d, factory=dict): """ Filter items in dictionary by values which are false. >>> iseven = lambda x: x % 2 == 0 >>> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> valfilterfalse(iseven, d) {2: 3, 4: 5} See Also: valfilter """ rv = factory() for k, v in d.items(): if not predicate(v): rv[k] = v return rv
d10f29e97946580f5f49dc928b2402c863c1617f
686,844
def yxbounds(shape1, shape2): """Bounds on the relative position of two arrays such that they overlap. Given the shapes of two arrays (second array smaller) return the range of allowed center offsets such that the second array is wholly contained in the first. Parameters ---------- shape1 : tuple Shape of larger array. shape2 : tuple Shape of smaller array. Returns ------- ybounds : tuple Length 2 tuple giving ymin, ymax. xbounds : tuple Length 2 tuple giving xmin, xmax. Examples -------- >>> yxbounds((32, 32), (15, 15)) (-8.5, 8.5), (-8.5, 8.5) """ yd = (shape1[0] - shape2[0]) / 2. xd = (shape1[1] - shape2[1]) / 2. return (-yd, yd), (-xd, xd)
57cbba112224c87571d2e0ad7558946658f4b04d
686,845
def parse_spec(spec, default_module): """Parse a spec of the form module.class:kw1=val,kw2=val. Returns a triple of module, classname, arguments list and keyword dict. """ name, args = (spec.split(':', 1) + [''])[:2] if '.' not in name: if default_module: module, klass = default_module, name else: module, klass = name, None else: module, klass = name.rsplit('.', 1) al = [a for a in args.split(',') if a and '=' not in a] kw = dict(a.split('=', 1) for a in args.split(',') if '=' in a) return module, klass, al, kw
5ea1c05488e77e1c7dd76ed2ae332dbea460f0ff
686,851
def handle_returning_date_to_string(date_obj): """Gets date object to string""" # if the returning date is a string leave it as is. if isinstance(date_obj, str): return date_obj # if event time is datetime object - convert it to string. else: return date_obj.isoformat()
e1219cac077f29683ca1c7edbc12920037dd4bb6
686,857
def getAtomNames(values): """ Assign to each value an atomname, O for negatives, H for 0 and N for positives. This function is created for assign atomnames for custom pdbs :param values: Collection of numbers to assing an atom name for :type values: iterable :returns: list -- List of atom names """ names = ["O", "H", "N"] values += 1 return [names[int(value)] for value in values]
63f0533ef9cc18467438ca5c48513992283d521a
686,859
def event_loop_settings(auto_launch=True): """Settings for pyMOR's MPI event loop. Parameters ---------- auto_launch If `True`, automatically execute :func:`event_loop` on all MPI ranks (except 0) when pyMOR is imported. """ return {'auto_launch': auto_launch}
9fc1565c093cb85d2019a2453df998aa0e54c598
686,860
def mate_in_region(aln, regtup): """ Check if mate is found within region Return True if mate is found within region or region is None Args: aln (:obj:`pysam.AlignedSegment`): An aligned segment regtup (:tuple: (chrom, start, end)): Region Returns: bool: True if mate is within region """ if regtup is None: return True return aln.next_reference_id == regtup[0] and \ regtup[1] < aln.next_reference_start < regtup[2]
969acb9ce356bf0a20b381ffe494705a3de2b5e2
686,862
def format_query(query, params=None): """ Replaces "{foo}" in query with values from params. Works just like Python str.format :type query str :type params dict :rtype: str """ if params is None: return query for key, value in params.items(): query = query.replace('{%s}' % key, str(value)) return query
cd4c1d42f139a321980c329cdb45953519bd3164
686,864
def rename_dupe_cols(cols): """ Renames duplicate columns in order of occurrence. columns [0, 0, 0, 0] turn into [0, 1, 2, 3] columns [name10, name10, name10, name10] turn into [name10, name11, name12, name13] :param cols: iterable of columns :return: unique columns with digits incremented. """ cols = [str(c) for c in cols] new_cols = [] for i in range(len(cols)): c = cols.pop(0) while c in new_cols: try: num = int(''.join(x for x in c if x.isdigit())) c = c.replace(str(num), str(num + 1)) except: c = c + str(2) new_cols.append(c) return new_cols
18e400609581c280f6a89babf5fd1f98ccab40a1
686,865
def find_all(soup, first, second, third): """ A simpler (sorta...) method to BeautifulSoup.find_all :param soup: A BeautifulSoup object :param first: The first item to search for. Example: div :param second: the second aspect to search for. The key in the key:value pair. Example: class :param third: The third aspect, which is the value in the key: value pair. Example: <class-name> Example: BeautifulSoup(<url>,<parser>).find_all("div", {"class": <"classname">}) is simplifed with this method by using: results = common.find_all(soup_obj, "div", "class", "<classname>") :return: a list of items found by the search. """ # returns a soup object with find_all try: items = soup.find_all(first, {second:third}) except Exception as error: print("There was an error trying to find all", error) return None if not items: print("Didn't find anything!") return items
df3109571e9a11710a1ecc3f7b835ca019229a24
686,866
def add_suffix_to_parameter_set(parameters, suffix, divider='__'): """ Adds a suffix ('__suffix') to the keys of a dictionary of MyTardis parameters. Returns a copy of the dict with suffixes on keys. (eg to prevent name clashes with identical parameters at the Run Experiment level) """ suffixed_parameters = {} for p in parameters: suffixed_parameters[u'%s%s%s' % (p['name'], divider, suffix)] = p['value'] return suffixed_parameters
39c184a6d836270da873b4c2e8c33e9a1d29f073
686,867
def value_to_string(ast): """Remove the quotes from around the string value""" if ast.value[:3] in ('"""', "'''"): ast.value = ast.value[3:-3] else: ast.value = ast.value[1:-1] return ast
01308b44f404dd68b0e5fdd4c5cfa02a0e19ed3f
686,868
def module_of (object) : """Returns the name of the module defining `object`, if possible. `module_of` works for classes, functions, and class proxies. """ try : object = object.__dict__ ["Essence"] except (AttributeError, KeyError, TypeError) : pass result = getattr (object, "__module__", None) if not result : globals = getattr (object, "func_globals", None) if globals : result = globals.get ("__name__") return result
0adc87b309d466ba1f5f1f78ad50379397528fb2
686,869
import itertools def nQubit_Meas(n): """ Generate a list of measurement operators correponding to the [X,Y,Z]^n Pauli group Input: n (int): Number of qubits to perform tomography over Returns: (list) list of measurement operators corresponding to all combinations of the n qubit Pauli group for QST or QPT """ #Pulse in [X,Y,Z] order seq_single = ['X', 'Y', 'Z'] return list(itertools.product(seq_single, repeat=n))
ff3ed6a75d160d139e3192d8d5255d4afa2930a8
686,870
import copy def make_all_strokes_dashed(svg, unfilled=False): """ Makes all strokes in the SVG dashed :param svg: The SVG, in xml.etree.ElementTree format :param unfilled: Whether this is an unfilled symbol :return: The resulting SVG """ stroke_elements = [ele for ele in svg.findall('.//*[@stroke]') if ele.attrib['stroke'] != 'none'] if not unfilled: stroke_elements_with_dash = [copy.deepcopy(ele) for ele in stroke_elements] for ele in stroke_elements: ele.attrib['stroke'] = '#ffffff' for ele in stroke_elements_with_dash: ele.attrib['stroke-dasharray'] = '2 2' svg.extend(stroke_elements_with_dash) else: for ele in stroke_elements: ele.attrib['stroke-dasharray'] = '2 2' return svg
fe1a0e6aaf72ec53edfd93da948ae953a3e7ae3c
686,871
def iter_copy(structure): """ Returns a copy of an iterable object (also copying all embedded iterables). """ l = [] for i in structure: if hasattr(i, "__iter__"): l.append(iter_copy(i)) else: l.append(i) return l
e9eb1000abb428ad15006fe18791861e75b17691
686,874
def tag_predicate(p): """ Given a full URI predicate, return a tagged predicate. So, for example, given http://www.w3.org/1999/02/22-rdf-syntax-ns#type return rdf:type """ ns = { "http://www.w3.org/1999/02/22-rdf-syntax-ns#":"rdf:", "http://www.w3.org/2000/01/rdf-schema#":"rdfs:", "http://www.w3.org/2001/XMLSchema#":"xsd:", "http://www.w3.org/2002/07/owl#":"owl:", "http://www.w3.org/2003/11/swrl#":"swrl:", "http://www.w3.org/2003/11/swrlb#":"swrlb:", "http://vitro.mannlib.cornell.edu/ns/vitro/0.7#":"vitro:", "http://purl.org/ontology/bibo/":"bibo:", "http://purl.org/spar/c4o/":"c4o:", "http://purl.org/spar/cito/":"cito:", "http://purl.org/dc/terms/":"dcterms:", "http://purl.org/NET/c4dm/event.owl#":"event:", "http://purl.org/spar/fabio/":"fabio:", "http://xmlns.com/foaf/0.1/":"foaf:", "http://aims.fao.org/aos/geopolitical.owl#":"geo:", "http://purl.obolibrary.org/obo/":"obo:", "http://purl.org/net/OCRe/research.owl#":"ocrer:", "http://purl.org/net/OCRe/study_design.owl#":"ocresd:", "http://www.w3.org/2004/02/skos/core#":"skos:", "http://vivo.ufl.edu/ontology/vivo-ufl/":"ufVivo:", "http://www.w3.org/2006/vcard/ns#":"vcard:", "http://vitro.mannlib.cornell.edu/ns/vitro/public#":"vitro-public:", "http://vivoweb.org/ontology/core#":"vivo:", "http://vivoweb.org/ontology/scientific-research#":"scires:" } for uri, tag in ns.items(): if p.find(uri) > -1: newp = p.replace(uri, tag) return newp return None
bfe1cf2ea83f4faee77299b0e7929571b550b59a
686,876
def chunk(arr:list, size:int=1) -> list: """ This function takes a list and divides it into sublists of size equal to size. Args: arr ( list ) : list to split size ( int, optional ) : chunk size. Defaults to 1 Return: list : A new list containing the chunks of the original Examples: >>> chunk([1, 2, 3, 4]) [[1], [2], [3], [4]] >>> chunk([1, 2, 3, 4], 2) [[1, 2], [3, 4]] >>> chunk([1, 2, 3, 4, 5, 6], 3) [[1, 2, 3], [4, 5, 6]] """ _arr = [] for i in range(0, len(arr), size): _arr.append(arr[i : i + size]) return _arr
0aa82c11fdc63f7e31747c95e678fdcac4571547
686,877
def listify(x): """Convert item to a `list` of items if it isn't already a `list`.""" if not isinstance(x, list): return [x] return x
e68cca5c56fb8537a8340682618aa509a5d54bd2
686,879
def find_missing_number(array): """ :param array: given integer array from 1 to len(array), missing one of value :return: The missing value Method: Using sum of list plus missing number subtract the list of the given array Complexity: O(n) """ n = len(array) expected_sum = (n + 1) * (n + 2) // 2 return expected_sum - sum(array)
e70ee9bbe7aa9924ddd0e3059c43ed920621ebe9
686,884
def prune_nones_list(data): """Remove trailing None values from list.""" return [x for i, x in enumerate(data) if any(xx is not None for xx in data[i:])]
ae596bdbfede7d86ef8b318a63eb1b8b53cf08bf
686,887
def get_clean_path_option(a_string): """ Typically install flags are shown as 'flag:PATH=value', so this function splits the two, and removes the :PATH portion. This function returns a tuple consisting of the install flag and the default value that is set for it. """ option, default = a_string.split('=') option = option[:-5] return option, default
56954c706de0d01077a6c6e023e997d66517e5a1
686,893
from typing import Tuple from typing import List import re def parse_command(input: str) -> Tuple[str, List[str]]: """ Parse the command. :param input: The input to parse. :returns: Tuple of command, and a list of parameters to the command. """ input = input.strip() # Strip whitespace at start and end. input = re.sub(r"\s\s+", " ", input) # Remove double spaces. tokens = input.split(" ") command = tokens.pop(0) return command, tokens
b6e8375940538452096ab13fcede5cd7ce242348
686,897
def id_2_addr(hue_id): """ Convert a Phillips Hue ID to ISY Address """ return hue_id.replace(':', '').replace('-', '')[-14:]
da1a085be4fc07b827f34dd8d5d556d218dd2a6c
686,898
def merge(novel_adj_dict, full_adj_dict): """ Merges adjective occurrence results from a single novel with results for each novel. :param novel_adj_dict: dictionary of adjectives/#occurrences for one novel :param full_adj_dict: dictionary of adjectives/#occurrences for multiple novels :return: full_results dictionary with novel_results merged in >>> novel_adj_dict = {'hello': 5, 'hi': 7, 'hola': 9, 'bonjour': 2} >>> full_adj_dict = {'hello': 15, 'bienvenue': 3, 'hi': 23} >>> merge(novel_adj_dict, full_adj_dict) {'hello': 20, 'bienvenue': 3, 'hi': 30, 'hola': 9, 'bonjour': 2} """ for adj in list(novel_adj_dict.keys()): adj_count = novel_adj_dict[adj] if adj in list(full_adj_dict.keys()): full_adj_dict[adj] += adj_count else: full_adj_dict[adj] = adj_count return full_adj_dict
90d77279aba293b7358bbb7e4b774769cf03588c
686,900
def jinja2_lower(env, s): """Converts string `s` to lowercase letters. """ return s.lower()
95c63c2a1eabc07ebe3a4440155588813d89a166
686,902
def compute_cycle_length_with_denom(denom): """ Compute the decimal cycle length for unit fraction 1/denom (where denom>1) e.g. 1/6 has decimal representation 0.1(6) so cycle length is 1, etc """ digit_pos = 0 # Remaining fraction after subtracting away portions of earlier decimal digits frac_numer = 1 frac_numer_history = dict() while True: digit_pos += 1 # For this digit position, compute the common denominator and numerator # for the remaining fraction. E.g. if we started with denom = 7, then: # digit_pos=1: frac = 1/7 => 10/70 - 1*7/70 = 3/70 [0.1] # digit_pos=2: frac = 3/70 => 30/700 - 4*7/700 = 2/700 [0.14] # digit_pos=3: frac = 2/700 => 20/7000 - 2*7/7000 = 6/7000 [0.142] # It's clear that we can ignore the denominator (it's known from digit_pos): # digit_pos=4: frac = 6/d => 60/10d - 8*7/10d = 4/10d [0.1428] # digit_pos=5: frac = 4/d => 40/10d - 5*7/10d = 5/10d [0.14285] # digit_pos=6: frac = 5/d => 50/10d - 7*7/10d = 1/10d [0.142857] # digit_pos=7: frac = 1/d => we're back to digit_pos=1! Seq length is 7-1 = 6. # Another example for 1/6: # digit_pos=1: frac = 1/d => 10/10d - 1*6/10d = 4/10d [0.1] # digit_pos=2: frac = 4/d => 40/10d - 6*6/10d = 4/10d [0.16] # digit_pos=3: frac = 4/d => we're back to digit_pos=2! Seq length is 3-2 = 1. minuend = 10 * frac_numer subtrahend = denom digit = minuend // subtrahend difference = minuend - digit * subtrahend # Has it terminated? if difference == 0: return 0 # Have we found a repeating sequence? if frac_numer in frac_numer_history: return digit_pos - frac_numer_history[frac_numer] # Add this digit to the "seen before" dict frac_numer_history[frac_numer] = digit_pos # Update remaining fraction numerator frac_numer = difference
50194cd7ffc699a59d48356f310679dfcc24c273
686,903
def binary_to_ascii(binary_item): """ Convert a binary number to an ASCII char. """ str_number = str(binary_item) if str_number == '': return -1 decimal_number = int(str_number, 2) decoded = chr(decimal_number) return decoded
b310ce04dc932832e326d8164a003300246f12b6
686,905
import shutil def read_binary(shared_dir, output_file): """ Read back binary file output from container into one string, not an iterable. Then remove the temporary parent directory the container mounted. :param shared_dir: temporary directory container mounted :param output_file: path to output file :return: str of file contents """ with open(output_file, 'rb') as file_handle: output = file_handle.read() shutil.rmtree(shared_dir) return output
ed79ea0bddccc4c24ee7bb412ce4526e2a90ad7c
686,914
def number_of_lines(filename=""): """returns number of lines in a file""" lines = 0 with open(filename, mode='r', encoding='utf-8') as a_file: for i, l in enumerate(a_file): lines += 1 return lines
ddc16356dcac6372bbbeab05742eabe47a44fa0b
686,916
import inspect import types def _repr_obj(obj, show_modules: bool = False, depth: int = 0) -> str: """Return a pretty representation of an object.""" rep = f"{obj.__class__.__name__} (" if show_modules: rep = f"{obj.__class__.__module__}.{rep}" tab = "\t" params = { name: getattr(obj, name) for name, param in inspect.signature(obj.__init__).parameters.items() # type: ignore if not ( param.name == "args" and param.kind == param.VAR_POSITIONAL or param.name == "kwargs" and param.kind == param.VAR_KEYWORD ) } n_params = 0 for name, val in params.items(): n_params += 1 # Prettify the attribute when applicable if isinstance(val, types.FunctionType): val = val.__name__ if isinstance(val, str): val = f'"{val}"' elif isinstance(val, float): val = ( f"{val:.0e}" if (val > 1e5 or (val < 1e-4 and val > 0)) else f"{val:.6f}".rstrip("0") ) elif isinstance(val, set): val = sorted(val) elif hasattr(val, "__class__") and "river." in str(type(val)): val = _repr_obj(obj=val, show_modules=show_modules, depth=depth + 1) rep += f"\n{tab * (depth + 1)}{name}={val}" if n_params: rep += f"\n{tab * depth}" rep += ")" return rep.expandtabs(2)
03d359423540821f83fb89d612a75d5f508ceb01
686,917
import json import base64 def b64_json_enc(data): """ encode data to b64 encoded json :data: data to encode :returns: encoded str """ json_str = json.dumps(data) return base64.b64encode(json_str.encode()).decode()
e5ecc8d05ff5f49872010daa500a210cddb91700
686,919
import re def is_valid_project_id(project_id): """True if string looks like a valid Cloud Project id.""" return re.match(r'^(google.com:)?[a-z0-9\-]+$', project_id)
46532d10a8a0ed304204d858445b935ff4f4bfe9
686,921
from random import shuffle def k_fold_split(X, Y, k=10, shuffleDataset=True): """ Split both list X and Y into k folds random will shuffle the data before, so two calls would not return the same folds ex: print(k_fold_split(["A", "B", "C", "D", "E", "F", "G"], ["a", "b", "c", "d", "e", "f", "g"], k=3, shuffleDataset=0)) [[('A', 'a'), ('B', 'b')], [('C', 'c'), ('D', 'd')], [('E', 'e'), ('F', 'f'), ('G', 'g')]] """ assert len(X) == len(Y) and k <= len(X) def chunkIt(seq, num): avg = len(seq) / float(num) out = [] last = 0.0 while last < len(seq): out.append(seq[int(last):int(last + avg)]) last += avg return out indexes = list(range(len(X))) if shuffleDataset: shuffle(indexes) foldsIndexes = chunkIt(indexes, k) folds = [[(X[i], Y[i]) for i in foldIndexes] for foldIndexes in foldsIndexes] return folds
0effb1cc05696fbd83423bf20520cc052835d42a
686,927
def indent(num_spaces): """Gets spaces. Args: num_spaces: An int describes number of spaces. Returns: A string contains num_spaces spaces. """ num = num_spaces spaces = '' while num > 0: spaces += ' ' num -= 1 return spaces
99784b1371330d2c1998c396b4235216044744d3
686,929
import time def exampleWorker(in_str, in_num): """ An example worker function. """ print('Got:', in_str, in_num) t1 = time.time() while True: if time.time() - t1 > 10: break return in_str + " " + str(in_num) + " X"
a1e25a6cb43428fcda6dbe06c7013b8f0d3f1411
686,931
def _get_proxy_type(type_id): """ Return human readable proxy type Args: type_id: 0=frontend, 1=backend, 2=server, 3=socket/listener """ proxy_types = { 0: 'frontend', 1: 'backend', 2: 'server', 3: 'socket/listener', } return proxy_types.get(int(type_id))
ecf513205f0118264e723169292a0385564bc480
686,940