content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def match_by_split(split: str) -> dict: """Get the $match query by split one of ['train', 'valid', 'test'].""" return {"$and": [{"is_AF": {"$exists": True}}, {"split": split}]}
6182892d62766754c63d6f13f6b8520ba5e4aa67
692,521
def sum_no_duplicates(numbers): """Takes list of numbers and ignores duplicates \ then gets the sum of numbers remaining.""" total = 0 for number in numbers: result = numbers.count(number) if result >= 2: print(number) else: total += number return total
812edb2b867dde6f414dd963a4e7ef85f76146ed
692,530
def splitDataSet(dataSet, axis, value): """ 按照给定特征划分数据集 :param dataSet: 待划分的数据集 :param axis: 划分数据集的特征的维度 :param value: 特征的值 :return: 符合该特征的所有实例(并且自动移除掉这维特征) """ retDataSet = [] for featVec in dataSet: if featVec[axis] == value: reducedFeatVec = featVec[:axis] # 删掉这一维特征 reducedFeatVec.extend(featVec[axis + 1:]) retDataSet.append(reducedFeatVec) return retDataSet
3cfddeeec479e369b35fd4910e2a7cd6e0e7d2f7
692,533
def recursive_fibonacci(n: int) -> int: """ Returns n-th Fibonacci number n must be more than 0, otherwise it raise a ValueError. >>> recursive_fibonacci(0) 0 >>> recursive_fibonacci(1) 1 >>> recursive_fibonacci(2) 1 >>> recursive_fibonacci(10) 55 >>> recursive_fibonacci(-2) Traceback (most recent call last): ... ValueError: n must be more or equal than 0 """ if n < 0: raise ValueError('n must be more or equal than 0') if n == 0: return 0 elif n == 1: return 1 return recursive_fibonacci(n - 1) + recursive_fibonacci(n - 2)
2a0d6c4980e0e306317a448b96162ecb7ce49fcd
692,534
def bigrams(text): """Return a list of pairs in text (a sequence of letters or words). >>> bigrams('this') ['th', 'hi', 'is'] >>> bigrams(['this', 'is', 'a', 'test']) [['this', 'is'], ['is', 'a'], ['a', 'test']] """ return [text[i:i+2] for i in range(len(text) - 1)]
86cdfe9b6e6c161c7cb11b89bc86b1ec0b80de1b
692,535
import copy def make_hash(o): """ Makes a hash from a dictionary, list, tuple or set to any level, that contains only other hashable types (including any lists, tuples, sets, and dictionaries). """ if isinstance(o, (set, tuple, list)): return tuple([make_hash(e) for e in o]) elif not isinstance(o, dict): return hash(o) new_o = copy.deepcopy(o) for k, v in new_o.items(): new_o[k] = make_hash(v) return hash(tuple(frozenset(sorted(new_o.items()))))
8ab3eb3bbfb952d238a1455a46eabd995bd655c9
692,539
def single_pulse_SCPI( pulsewidth, updown, high_voltage, low_voltage, channel="1", *args, **kwargs ): """ Returns SCPI string that can be written to the pulse generator to put it in the correct state to apply a single pulse. args: pulsewidth (str): Pulsewidth. i.e. '10ns' allowed units {ns, us, ms, s} updown (str): Specify polarity. 'up' or 'down'. high_voltage (str): High voltage of pulse. i.e. '1000mv' allowed units {V, mv} low_voltage (str): Low voltage of pulse. i.e. '-1000mv' allowed units {V, mv} channel (str): Specify the output channel. '1' or '2' """ if pulsewidth[-2:] not in set( { "ns", "us", "ms", } ): if pulsewidth[-1] != "s": raise ValueError("pulsewidth " + str(pulsewidth) + " not supported") if updown not in set({"up", "down"}): raise ValueError("updown " + str(updown) + " not supported") if high_voltage[-2:].lower() not in set({"mv"}): if high_voltage[-1].lower() != "v": raise ValueError("high_voltage " + str(high_voltage) + " not supported") if low_voltage[-2:].lower() not in set({"mv"}): if low_voltage[-1].lower() != "v": raise ValueError("low_voltage " + str(low_voltage) + " not supported") if channel not in set({"1", "2"}): raise ValueError("channel " + str(channel) + " not supported") if updown == "up": out = "outp" + channel + ":puls:mode sin;" out += ":sour" + channel + ":inv off;" out += ":sour" + channel + ":volt:lev:imm:high " + high_voltage + ";" out += ":sour" + channel + ":volt:lev:imm:low " + low_voltage + ";" # puls1 means the first pulse because we are in single mode out += ":sour" + channel + ":puls1:wid " + pulsewidth + ";" return out else: out = "outp" + channel + ":puls:mode sin;" out += ":sour" + channel + ":inv on;" out += ":sour" + channel + ":volt:lev:imm:low " + low_voltage + ";" out += ":sour" + channel + ":volt:lev:imm:high " + high_voltage + ";" # puls1 means the first pulse because we are in single mode out += ":sour" + channel + ":puls1:wid " + pulsewidth + ";" return out
f24ea735339d140b4b943e5f4a29ae8f89785413
692,542
from typing import Optional import json def _try_parse_json(json_string: str, ref_val=None) -> Optional[dict]: """ Return whether the string can be interpreted as json. :param json_string: str, string to check for json :param ref_val: any, not used, interface design requirement :return None if not parseable, otherwise the parsed json object """ parsed = None try: parsed = json.loads(json_string) except (ValueError, TypeError): pass return parsed
a609eeefb32d88970ecf039578e8eb8a65ad8108
692,545
def collapse_words(doc): """ Collapse a doc to a list of words """ return [ word for part in doc for sent in part for word in sent ]
9995653b0f457708c5aff3e9f3ac776eb21c02e0
692,546
def make_anchor(value: str) -> str: """ Makes a GitHub-compatible anchor for `value`. Arguments: value: Heading to anchor. Returns: Anchor. """ wip = "" for c in value: if str.isalnum(c): wip += c.lower() elif c in [" ", "-"]: wip += "-" return wip
29fa8a700922136c9a253da9f7f90febaeedf1bf
692,556
def adjust_contrast(image, contrast_level): """Return the image scaled to a certain contrast level in [0, 1]. parameters: - image: a numpy.ndarray - contrast_level: a scalar in [0, 1]; with 1 -> full contrast """ assert(contrast_level >= 0.0), "contrast_level too low." assert(contrast_level <= 1.0), "contrast_level too high." return (1-contrast_level)/2.0 + image.dot(contrast_level)
967d828b2e8874afb98f3630a5781317d9dc5b1c
692,557
def _make_mergable_with_params(dist, category): """Change the index and Series name to easily merge it to params. Args: dist (pandas.Series): distribution of number of contacts. The index is the support, the values the probabilities. category (str): name of the contact model to which the distribution belongs. This is set as the category index level of the returned Series. Returns: pandas.Series: Series with triple index category, subcategory, name. the name index level is the support. the value column contains the probabilities. """ dist.name = "value" dist = dist.to_frame() dist["category"] = category dist["subcategory"] = "n_contacts" dist["name"] = dist.index dist = dist.set_index(["category", "subcategory", "name"], drop=True) return dist
d7dc9c902a4695c76a6638e037af6a3495eeeaeb
692,560
def strip_levels(df, rows=None, columns=None): """ Function that strips a MultiIndex DataFrame for specified row and column index Parameters ---------- df: pandas.DataFrame rows: int Row index to remove, default None columns: int Column index to remove, default None Returns ------- df_strip: pandas.DataFrame The input dataframe stripped for specified levels """ df_strip = df.copy() if rows is not None: if df_strip.index.nlevels > 1: df_strip.index = df_strip.index.droplevel(rows) if columns is not None: if df_strip.columns.nlevels > 1: df_strip.columns = df_strip.columns.droplevel(columns) return df_strip
27394c0a92002ee53a0fe7eff3e2170122ce48d4
692,564
def may_develop_severe_illness(age, sex, rng): """ Likelihood of getting really sick (i.e., requiring hospitalization) from Covid-19 Args: age ([int]): [description] sex ([int]): [description] rng ([RandState]): [description] Returns: Boolean: returns True if this person would likely require hospitalization given that they contracted Covid-19 """ # age < 10 < 20 < 30 < 40 < 50 < 60 < 70 < 80 < 90 default female = [0.02, 0.002, 0.05, 0.05, 0.13, 0.18, 0.16, 0.24, 0.17, 0.03] male = [0.002, 0.02, 0.03, 0.07, 0.13, 0.17, 0.22, 0.22, 0.15, 0.03] other = [0.02, 0.02, 0.04, 0.07, 0.13, 0.18, 0.24, 0.24, 0.18, 0.03] hospitalization_likelihood = other if sex.lower().startswith('f'): hospitalization_likelihood = female elif sex.lower().startswith('m'): hospitalization_likelihood = male if age > 90: age = 90 index_of_age_category = (age - (age % 10))//10 # round down to nearest 10, then floor divide by 10 return rng.rand() < hospitalization_likelihood[index_of_age_category]
23322ca333c32a090380cdbed54827812c9e3ac3
692,568
def get_artists(tracks): """ Returns a dict where: key: artist_id value: list of track ids """ artists = {} for _,row in tracks.iterrows(): artist_id = row['artist_id'] if artist_id in artists: artists[artist_id].append(row['track_id']) else: artists[artist_id] = [row['track_id']] return artists
9b7136e5c6e3838d11d8defe8d038917224423ce
692,569
def is_prime(number: int) -> bool: """ Checks if `number` is a prime number. Parameters ---------- number : The number to check for primality. Returns ------- is_number_prime : `bool` Boolean indicating if `number` is prime or not. """ if number <= 1: return False # number ** 0.5 is faster than math.sqrt(number) for x in range(2, int(number**0.5) + 1): # Return False if number is divisible by x if number % x == 0: return False return True
1dc99526a4361fafdaa048a1b21ff7ce53cf36c6
692,570
import torch def MNLLLoss(logps, true_counts): """A loss function based on the multinomial negative log-likelihood. This loss function takes in a tensor of normalized log probabilities such that the sum of each row is equal to 1 (e.g. from a log softmax) and an equal sized tensor of true counts and returns the probability of observing the true counts given the predicted probabilities under a multinomial distribution. Can accept tensors with 2 or more dimensions and averages over all except for the last axis, which is the number of categories. Adapted from Alex Tseng. Parameters ---------- logps: torch.tensor, shape=(n, ..., L) A tensor with `n` examples and `L` possible categories. true_counts: torch.tensor, shape=(n, ..., L) A tensor with `n` examples and `L` possible categories. Returns ------- loss: float The multinomial log likelihood loss of the true counts given the predicted probabilities, averaged over all examples and all other dimensions. """ log_fact_sum = torch.lgamma(torch.sum(true_counts, dim=-1) + 1) log_prod_fact = torch.sum(torch.lgamma(true_counts + 1), dim=-1) log_prod_exp = torch.sum(true_counts * logps, dim=-1) return -log_fact_sum + log_prod_fact - log_prod_exp
b06ca242884a83c0076ffa5bb4d7188b9e184d0a
692,571
from typing import Optional def pkcs7pad(bs: bytes, blocksize: Optional[int] = None) -> bytes: """ S2C09 - Implement PKCS#7 padding https://cryptopals.com/sets/2/challenges/9 A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext. But we almost never want to transform a single block; we encrypt irregularly-sized messages. One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even multiple of the blocksize. The most popular padding scheme is called PKCS#7. So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block. """ if blocksize is None: blocksize = 16 l = len(bs) missing = l % blocksize numpad = blocksize - missing return bs + bytes([numpad])*numpad
d9066dd583280da6af162dab859bebfbfc5e7f81
692,572
def split_host(host): """ Splits host into host and port. :param str host: Host including port. :returns: A ``(str(host), int(port))`` tuple. """ host, port = (host.split(':') + [None])[:2] return host, int(port)
d1faec745fccd85b4e6c34e0bbb5ca84b67a6ec3
692,576
def generate_single_type_function_pointer_typedefs(functions): """ Generate typedef for function pointers: typedef return_type (*tMyFunction)(arguments). """ lines = [] for function in functions: line = "typedef {} (*t{}) ({});" . format( function.return_type, function.name, ", ".join(str(arg) for arg in function.arguments)) lines.append(line) return lines
a5ce49670cbce2107585381b8c7f9ba30b7adbd5
692,579
def GetPw(abs_hum, elevation): """ 水蒸気分圧[kPa]を計算する Parameters ---------- abs_hum : float 絶対湿度[kg/kg'] elevation : float 標高[m] Returns ---------- Pw: float 水蒸気分圧[kPa] """ # Po = 101.325 #標準大気圧[kPa], 標高0m Po = 1013.2 - 0.12 * elevation + 5.44 * 10**(-6) * elevation ** 2 Po = Po/10.0 #[hPa]->[kPa]換算 pw = (abs_hum * Po)/(abs_hum + 0.62198) return pw
445f85c5491c248bd4a0ce6d19628a77eab6b555
692,580
def decode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__ """ decode(obj, [encoding[,errors]]) -> object Decodes obj using the codec registered for encoding. encoding defaults to the default encoding. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a ValueError. Other possible values are 'ignore' and 'replace' as well as any other name registered with codecs.register_error that is able to handle ValueErrors. """ return object()
b996cc134c3c44374ebbb11d20385720e43a477c
692,586
import pathlib def create_output_file_path(filepath_output): """ Create output file and returns it's pathlib.Path instance. """ # Convert filepath from string to pathlib.Path instance. path = pathlib.Path(filepath_output) # Create parent directory if not exists. if not path.parent.exists(): path.parent.mkdir(exist_ok=True) # Raise error if the parent directory is not a directory (e.g. regular file). if not path.parent.is_dir(): raise RuntimeError("failed to create output directory") return path
7285d3deab8550b79921c81f8a4ef268522eb114
692,587
def opt_err_func(params, x, y, func): """ Error function for fitting a function using non-linear optimization. Parameters ---------- params : tuple A tuple with the parameters of `func` according to their order of input x : float array An independent variable. y : float array The dependent variable. func : function A function with inputs: `(x, *params)` Returns ------- float array The marginals of the fit to x/y given the params """ return y - func(x, *params)
58d6d4b4cadc95e683f8a431a7df912148760c92
692,590
def _minutes_to_seconds(time): """Convert time: minutes to seconds""" return time * 60.0
319d0a39f94bded468a4f27c55b8c80201724d30
692,592
def endx(eta,merger_type): """ Gives ending value/upper boundary for integration of post-Newtonian parameter, based on Buskirk et al. (2019) equation 23. Parameters ---------- eta: float Symmetric mass ratio of the binary, can be obtained from get_M_and_eta(). merger_type: string 'BH' for a BH-BH merger, 'NS' for a BH-NS or NS-NS merger Returns ------- value: float The ending value for the post-Newtonian integration. """ #input type checking assert type(eta) == float, 'eta should be a float.' if merger_type == 'BH': value = (1/3)*(1 + (7/18)*eta) #Buskirk eq. 23, with 1/6 -> 1/3 to ensure overlap with merger portion #for the matching script elif merger_type == 'NS': value = (1/6)*(1 + (7/18)*eta) #Buskirk eq. 23 #1/6 because 3RSch is the ISCO for BH-NS and approx. the touching point #for NS-NS, so the end condition for both else: raise ValueError('merger_type must be either \'BH\' for BH-BH or ' '\'NS\' for BH-NS or NS-NS.') return value
972f292ed8e2d8cdd5d7e400866b588e7a531db9
692,597
def one_hot(label, all_labels): """One hot encodes a label given all labels""" one_hot_arr = [0 for _ in range(len(all_labels))] for i, label_i in enumerate(all_labels): if label == label_i: one_hot_arr[i] = 1 return one_hot_arr
8060c26783d80933cdb628fdc319dc86635adc80
692,598
import pickle def load_cachedfilestring(cache_folder, filename): """ Loads the file string that has been previously cached in the cache folder. Args: cache_folder: A string representing the path of the cache folder. filename: A string representing the name of the file that is being loaded. Returns: The file string that loaded from the cache folder (returns an empty string if there is no string to load). """ try: file_string = pickle.load(open(cache_folder + filename, 'rb')) return file_string except: return ""
83a840d4ebea6f895ac6df5d9d7d6970c3742a5a
692,601
def split_ids(ids, n=2): """将每个id拆分为n个,为每个id创建n个元组(id, k)""" # 等价于for id in ids: # for i in range(n): # (id, i) # 得到元祖列表[(id1,0),(id1,1),(id2,0),(id2,1),...,(idn,0),(idn,1)] # 这样的作用是后面会通过后面的0,1作为utils.py中get_square函数的pos参数,pos=0的取左边的部分,pos=1的取右边的部分 return ((id, i) for id in ids for i in range(n))
594cdad1d64ce9bac5a4ed384eb9a6b613dcfad2
692,604
def insert_statement(table_name, columns, data=None): """ Generates an INSERT statement for given `table_name`. :param str table_name: table name :param tuple columns: tuple of column names :param data: dict of column name => value mapping :type data: dict or None :return: SQL statement template suitable for sqlalchemy.execute() :rtype: str """ data = {} if data is None else data columns_list = [] values_list = [] for column in columns: if column not in data: continue columns_list.append(column) values_list.append(":{column}".format(column=column)) return "INSERT INTO {table_name} ({columns_list}) VALUES ({values_list})".format( table_name=table_name, columns_list=', '.join(columns_list), values_list=', '.join(values_list) )
ef643bde991b1fd6d9a5772e90a4b6dcf021b4b2
692,605
import numbers def torch_data_sum(x): """ Like ``x.data.sum()`` for a ``torch.autograd.Variable``, but also works with numbers. """ if isinstance(x, numbers.Number): return x return x.data.sum()
9243fcbbb7ff3a04f998a07d1ebc05a6ced97259
692,606
def batchify(array: list, bs: int = 1, generator: bool = True): """Convert any iterable into a list/generator with batch size `bs`""" def list_to_batch(array, bs): n = len(array) for i in range(0, n, bs): batch = array[i : i + bs] yield batch if generator: return list_to_batch(array, bs) else: return list(list_to_batch(array, bs))
0393640a55ff98bb9ce16a4fe3daac1951206aa7
692,608
from typing import Tuple def get_grid(n: int) -> Tuple[int, int]: """Gets the number of rows and columns needed according the number of subplots.""" rows = (n + 2) // 3 if n <= 3: cols = n elif n == 4: cols = 2 else: cols = 3 return rows, cols
0301ddc062157212919382d5c7067d96657ed257
692,612
def row_col_indices_from_flattened_indices(indices, num_cols): """Computes row and column indices from flattened indices. Args: indices: An integer tensor of any shape holding the indices in the flattened space. num_cols: Number of columns in the image (width). Returns: row_indices: The row indices corresponding to each of the input indices. Same shape as indices. col_indices: The column indices corresponding to each of the input indices. Same shape as indices. """ # Avoid using mod operator to make the ops more easy to be compatible with # different environments, e.g. WASM. row_indices = indices // num_cols col_indices = indices - row_indices * num_cols return row_indices, col_indices
8b56ab9a63a4edb929d5ba0a6bc0d52da61939f8
692,614
def get_item(obj, key): """ Obtain an item in a dictionary style object. :param obj: The object to look up the key on. :param key: The key to lookup. :return: The contents of the the dictionary lookup. """ try: return obj[key] except KeyError: return None
b27170c6df98aac61ff133542597a4f369f0660c
692,615
def convert_phrase_to_url(phrase): """ Converts a phrase such as word1 word2 to a wikipedia URL of the form http://en.wikipedia.org/wiki/word1_word2 and returns it""" prefix = 'http://en.wikipedia.org/wiki/' url = prefix + '_'.join(phrase.split()) return url
57633b17c06a4aad6e8ff3911a3e520390ad4952
692,617
def psudo_graph(transactions): """ Key prefix - out going edges postfix - in comming edges """ pg=dict() for transaction_id, transaction in enumerate(transactions): for i in range(len(transaction)): u=transaction[i] if i != len(transaction) - 1: v=transaction[i+1] if u not in pg: pg[u]={"prefix":{transaction_id:v}, "postfix": dict()} else: pg[u]["prefix"][transaction_id]=v if v not in pg: pg[v]={"prefix":dict(), "postfix": {transaction_id:u}} else: pg[v]["postfix"][transaction_id]=u return pg
9cc11be18fea3f0584e80c4fb9579ec6fa9faa1d
692,618
def mac_addr_is_unicast(mac_addr): """Returns True if mac_addr is a unicast Ethernet address. Args: mac_addr (str): MAC address. Returns: bool: True if a unicast Ethernet address. """ msb = mac_addr.split(':')[0] return msb[-1] in '02468aAcCeE'
443d349315e2d1ef9997ab023e2a5a243fd52150
692,620
def make_bare_labels(subsystem_count, *args): """ For two given subsystem states, return the full-system bare state label obtained by placing all remaining subsys_list in their ground states. Parameters ---------- subsystem_count: int number of subsys_list inside Hilbert space *args: tuple(int, int) each argument is a tuple of the form (subsys_index, label) Returns ------- tuple Suppose there are 5 subsys_list in total. Let (subsys_index1=0, label1=3), (subsys_index2=2, label2=1). Then the returned bare-state tuple is: (3,0,1,0,0) """ bare_labels = [0] * subsystem_count for subsys_index, label in args: bare_labels[subsys_index] = label return tuple(bare_labels)
30fb36fc230f2fa4e9dde55e535996ba7549ed7b
692,621
from typing import Counter def get_most_freq_c(data, n): """ Finds the n most frequent items in data args: data (list of tuples (name, sex)), n returns: list of names (str) """ cnt = Counter() for name, sex in data: cnt[name] += 1 return cnt.most_common(n)
9cb32f9780afbd73e5f9cbfba8e4809d5b81768f
692,622
def fn_oostatus2bool(str_status): """Convert OmniOutliner checked/unchecked to boolean""" return (str_status == 'checked')
beb11b8b8aca12f22bb9418bf9005bd5f9ad1b48
692,624
import torch def get_onehot(data_list, categories) -> torch.Tensor: """Transform lists of label into one-hot. Args: data_list (list of list of int): source data. categories (int): #label class. Returns: torch.Tensor: one-hot labels. """ onehot_labels = [] for label_list in data_list: onehot_label = torch.zeros(categories) for label in label_list: onehot_label[label] = 1.0 / len(label_list) onehot_labels.append(onehot_label) return torch.stack(onehot_labels, dim=0)
ea7ed1c1e292dd3872000e4d16ecb1a23401b92a
692,626
import torch def sparsity_2D(tensor): """Create a list of sparsity levels for each channel in the tensor 't' For 4D weight tensors (convolution weights), we flatten each kernel (channel) so it becomes a row in a 3D tensor in which each channel is a filter. So if the original 4D weights tensor is: #OFMs x #IFMs x K x K The flattened tensor is: #OFMS x #IFMs x K^2 For 2D weight tensors (fully-connected weights), the tensors is shaped as #IFMs x #OFMs so we don't need to flatten anything. To measure 2D sparsity, we sum the absolute values of the elements in each row, and then count the number of rows having sum(abs(row values)) == 0. """ if tensor.dim() == 4: # For 4D weights, 2D structures are channels (filter kernels) view_2d = tensor.view(-1, tensor.size(2) * tensor.size(3)) elif tensor.dim() == 2: # For 2D weights, 2D structures are either columns or rows. # At the moment, we only support row structures view_2d = tensor else: return 0 num_structs = view_2d.size()[0] nonzero_structs = len(torch.nonzero(view_2d.abs().sum(dim=1))) return 1 - nonzero_structs / num_structs
57dc9ee68d42e84db99cc25a48ae6843a88b8c3e
692,633
from typing import Type from typing import Tuple from typing import Optional from typing import List from typing import TypeVar def parse_hint(hint: Type) -> Tuple[Type, Optional[List]]: """Parse a typing hint into its type and and arguments. For example: >>> parse_hint(Union[dict, list]) (typing.Union, [<class 'dict'>, <class 'list'>]) >>> parse_hint(int) (<class 'int'>, None) """ if hasattr(hint, "__origin__"): # This is a type hint (eg typing.Union) # Filter out TypeVars such as KT & VT_co (they generally # indicate that no explicit hint was given) hint_args = [a for a in getattr(hint, "__args__", []) if not isinstance(a, TypeVar)] return hint.__origin__, hint_args or None else: # This is something other than a type hint # (e.g. an int or datetime) return hint, None
4900f319240f5feeda8a9b0be768d794f8b24a70
692,635
import string def digitsOnly(s): """Return True if s only contains digits only and False otherwise.""" return (s == ''.join([c for c in s if c in string.digits]))
0592f825ff014c25266d7b55774da7a55916a5e1
692,637
def _initialize_headers(headers): """Creates a copy of the headers. Args: headers: dict, request headers to copy. Returns: dict, the copied headers or a new dictionary if the headers were None. """ return {} if headers is None else dict(headers)
61e348c065e1895321ca622572978e60a9dfed47
692,639
def linear_regression(x, y): """ Calculates a linear regression model for the set of data points. Args: x: a 1-d numpy array of length N, representing the x-coordinates of the N sample points y: a 1-d numpy array of length N, representing the y-coordinates of the N sample points Returns: (m, b): A tuple containing the slope and y-intercept of the regression line, both of which are floats. """ m = (((x-x.mean())*(y-y.mean())).sum())/((x - x.mean())**2).sum() b = y.mean() - m*x.mean() return (m,b)
33037a2d57172ff2eb386ed35787cda08eb8f11d
692,640
def ask_for_int(sentence: str) -> int: """ Ask the user for an integer. """ while True: try: return int(input(sentence)) except ValueError: print("Invalid input. Please try again.")
a494b2d5c1a40948b04fe83c6d73c4d867b7db1f
692,641
def crop_image(img, crop_idx): """ Returns a cropped version of img with the provided crop indices """ upper, lower = crop_idx[0] left, right = crop_idx[1] return img[upper:lower, left:right]
23062d1070053664fe8c1d88e6a04883bea33eb3
692,642
def guess_subsystem(host: str) -> str: """Guess the subsystem based on the host name.""" host = host.replace("_", "-").lower() if "-vac" in host: return "Vacuum" if "-optics" in host: return "Optics" if "-motion" in host: return "Motion" if "-vonhamos" in host: return "Motion" if "-sds" in host: return "SDS" try: return host.split("-")[1].upper() except Exception: return "PythonLogDaemon"
7c0ecda32517d110c4fd5bceb177b0da77898268
692,643
def uccsd_convert_amplitude_format(single_amplitudes, double_amplitudes): """Re-format single_amplitudes and double_amplitudes from ndarrays to lists. Args: single_amplitudes(ndarray): [NxN] array storing single excitation amplitudes corresponding to t[i,j] * (a_i^\dagger a_j - H.C.) double_amplitudes(ndarray): [NxNxNxN] array storing double excitation amplitudes corresponding to t[i,j,k,l] * (a_i^\dagger a_j a_k^\dagger a_l - H.C.) Returns: single_amplitudes_list(list): list of lists with each sublist storing a list of indices followed by single excitation amplitudes i.e. [[[i,j],t_ij], ...] double_amplitudes_list(list): list of lists with each sublist storing a list of indices followed by double excitation amplitudes i.e. [[[i,j,k,l],t_ijkl], ...] """ single_amplitudes_list, double_amplitudes_list = [], [] for i, j in zip(*single_amplitudes.nonzero()): single_amplitudes_list.append([[i, j], single_amplitudes[i, j]]) for i, j, k, l in zip(*double_amplitudes.nonzero()): double_amplitudes_list.append([[i, j, k, l], double_amplitudes[i, j, k, l]]) return single_amplitudes_list, double_amplitudes_list
54ecedcda46950b81802f0ea23df54a7c7cddd7a
692,645
def transposition_string_012_format(move_sequence_string): """Return a transposition string in the following format. 0 is a separator between columns 1 represent disks from the beginning player and 2 disks from the other player. Between the separators each column is described from below. For example the transposition 0000000 0000000 0000000 0000000 0002000 0001000 is represented as "00012000". 0000000 0000000 0002000 0001000 0102000 0201021 is represented as "02100121200201". """ board = [[], [], [], [], [], [], []] player = "1"; for move in move_sequence_string: board[int(move)].append(player) if player == "1": player = "2" else: player = "1" return "0".join(["".join(column) for column in board])
592ca9ad0c9742caac8b493fcead586bee072457
692,646
from typing import Optional def get_size_param(parameter: dict) -> Optional[str]: """Get the size of the given parameter.""" size = parameter.get("size", {}) return size.get("value", None)
ef70ffe2e0333c2765fcf9683547fd7c4ead783f
692,648
def tricky_tt(request): """Return tricky TT lines to parse.""" return request.param
46a0f99caab6aea29e1d353c6e57357c3b8e74b1
692,650
def guess_identifier_format(identifier_str): """Guess identifier format. :param str identifier_str: Chemical identifier string. :return: 'inchi' or 'smiles' string. :rtype: :py:class:`str` """ if identifier_str.startswith('InChI='): return 'inchi' else: return 'smiles'
37128f15f16da64e533b1e7a878c288711016dd9
692,651
def extract_extension_attributes(schema: dict) -> dict: """Extract custom 'x-*' attributes from schema dictionary Args: schema (dict): Schema dictionary Returns: dict: Dictionary with parsed attributes w/o 'x-' prefix """ extension_key_format = 'x-' extensions_dict: dict = { key.replace(extension_key_format, '').replace('-', '_'): value for key, value in schema.items() if key.startswith(extension_key_format) } return extensions_dict
80829a1e222b7e55d41483592e20b06bb63ea8a2
692,654
def max_contig_sum(L): """ L, a list of integers, at least one positive Returns the maximum sum of a contiguous subsequence in L """ ############# This is getting the biggest powerset of L # def powerset(s): # x = len(s) # masks = [1 << i for i in range(x)] # for i in range(1 << x): # yield [ss for mask, ss in zip(masks, s) if i & mask] # # max_value = 0 # for i in list(powerset(L)): # if sum(i) > max_value: # max_value = sum(i) # return max_value ############ This is getting the maximum contiguous subsequence max_value = 0 value = 0 for i in range(len(L)): value = value + L[i] if value < 0: value = 0 if max_value < value: max_value = value return max_value
33e73e4a98943adadfda75af103588e7caa2115f
692,656
def is_palindrome(s): """ Input: s, a string Returns True if s is a palindrome, False otherwise """ def to_chars(s): s = s.lower() ans = '' for char in s: if char in 'abcdefghijklmnopqrstuvwxyz': ans = ans + char return ans def is_pal(s): if len(s) <= 1: return True else: return s[0] == s[-1] and is_pal(s[1:-1]) return is_pal(to_chars(s))
a956ee66f20d57eb58dae99c7108739b84bf313d
692,663
def sec0to1(val): """ Converts the system security values into values between 0 and 1 """ retval = 0.0 if val < 0: retval = 0.0 elif val > 1: retval = 1.0 else: retval = round(val, 1) return retval
69f63794a851a32ebc8a9f5c209a78354bce6586
692,666
import re def filter_timestamps(in_str_list): """ Filter out timestamps and core IDs in OpTiMSoC STDOUT/STM/CTM log files The timestamps depend at least on the compiler, but also on other variables. For functional tests we are only interested in the output, not the timing of the output. """ filter_expr = re.compile(r'^\[\s*\d+, \d+\] ', flags=re.MULTILINE) return [filter_expr.sub(repl='', string=l) for l in in_str_list]
c43661b49b75c18df1a436d7a548c74284629fa3
692,667
def build_nyiso_url(month, data_type, zone): """Builds a string that is the URL address for a NYISO data file. Args: month: pandas timestamp for the first day of the month of data requested data_type: string denoting the type of NYISO data to retrieve, examples include "damlbmp" which stands for "day ahead market location based marginal price" or "outSched" for "outage schedule" zone: string denoting the NYISO geographic zone of the data to be requested. This is required if data_type == "damlbmp" Returns: url: string giving the URL address of a NYISO data file, similar to the following example URL: 'http://mis.nyiso.com/public/csv/damlbmp/20180201damlbmp_zone_csv.zip' """ # Raise an error if the zone isn't defined when it needs to be. if data_type == 'damlbmp' and zone == None: raise RuntimeError("Zone must be specified when data_type == 'damlbmp'") def _to_yyyymmdd(timestamp): """Returns the yyyymmdd format date given a pandas timestamp object""" s = str(timestamp) r = s[0:4] + s[5:7] + s[8:10] return r url = "http://mis.nyiso.com/public/csv/" url = url + data_type + "/" url = url + _to_yyyymmdd(month) + data_type if zone != None: url = url + "_zone" url = url + "_csv.zip" return url
e3f53df7b9136aaa00796247989e3cd5b01d1216
692,668
def np_chunk(tree): """ Return a list of all noun phrase chunks in the sentence tree. A noun phrase chunk is defined as any subtree of the sentence whose label is "NP" that does not itself contain any other noun phrases as subtrees. """ list_nodes = list() # get all the NP nodes for noun_nodes in tree.subtrees(): if noun_nodes.label() == 'NP': list_nodes.append(noun_nodes) return list_nodes
9f80a677fe62377191985ed0690cf86f196721d6
692,671
import binascii def bin2macaddress(data): """Convert a byte-string to a MAC address.""" mac = binascii.b2a_hex(data) chunks = list() for i in range(len(mac)): if i % 2 == 0: chunks.append(mac[i : i + 2]) result = b":".join(chunks) return result.decode()
2c3f6989810adb6257cd169b817ab3d06f58feff
692,675
from typing import List import random def create_session(attendees: List[list], room_size: int) -> dict: """ Function to generate a possible number of scenrios for sessions everyone to meet each other. Parameters: attendees : List - A list of the attendees for the session room_size : int - A numerical value for the number of people per room Return: scenarios : dict - A dict of possible scenarios, key is the number of sessions and the value is attendees in session """ randomised_attendees = attendees[:] random.shuffle(randomised_attendees) session = {} room_num = 1 while bool(randomised_attendees) is True: room_attendess = [] for i in range(room_size): if bool(randomised_attendees) is False: room_attendess.append("") else: room_attendess.append(randomised_attendees.pop()) session[f"Room {room_num}"] = room_attendess room_num += 1 return session
b7431afc17e95308e9d48c8f68d92d5f2564bb70
692,676
def match_args(macro, args): """ Match args names with their values """ if 'args' not in macro: return {} return dict(list(zip(macro['args'], args)))
1360771bb397b62f849a3227ffcf82f78302143c
692,678
def geometric_pmi_score(pdict, wlist1, wlist2): """ Calculate geometric mean of PMI over all word pairs in two word lists, given pre-computed PMI dictionary - If geometric PMI is undefined, return -inf - The geometric mean is undefined if: - Any of the PMIs are negative - None of the word pairs have a defined PMI """ product_pmi = None for word1 in wlist1: for word2 in wlist2: # Enforce alphabetical order in pair pair = tuple(sorted([word1, word2])) wi, wj = pair if wi in pdict and wj in pdict[wi]: if product_pmi is None: product_pmi = 1 pmi = pdict[wi][wj] # Check if PMI is negative if pmi > 0: product_pmi *= pmi else: product_pmi = float("-inf") break # If PMI is negative, break out of the loop completely if product_pmi == float("-inf"): break if product_pmi is None: # None of the word pairs had a defined PMI return float("-inf") elif product_pmi == float("-inf"): # At least one word pair had a negative PMI return float("-inf") else: return product_pmi ** (1/len(wlist1)/len(wlist2))
b513f0d643c062d91010b2c8d8c666373b0c86ad
692,680
def _multicast_groups(subpages, metadata): """Order the subpages of a multicast page into groups according to hints given in the metadata. Arguments: subpages(list): list of subpages of the multicast page metadata(dict): the metadata dictionary of the multicast page. The only control parameters so far is: 'items_per_page' Returns: a list of lists where each list represents one group of subpages that is to appear on one output page. """ n = metadata.get('MC_PAGINATION', 1) if n < 1000: # interpret pagination parameter as number of items per page return [subpages[k:k + n] for k in range(0, len(subpages), n)] else: # interpret pagination parameter as size, i.e. number of characters # per page groups = [] group = [] cnt = 0 for sp in subpages: size = len(sp) if len(group) == 0 or size + cnt <= n: group.append(sp) cnt += size else: groups.append(group) group = [sp] cnt = size return groups
89032beaa5f3bd8a6a54db8b6edc2d6c7ef38a04
692,681
import re def find_phone_number(text): """ Spain Mobile numbers have ten digit. I will write that pattern below. Parameters ---------- text: str Text selected to apply transformation Examples: --------- ```python find_phone_number("698887776 is a phone number of Mark from 210,North Avenue") >>> '698887776' ``` """ line = re.findall(r"(\+34|0034|34)?[ -]*(6|7)[ -]*([0-9][ -]*){8}", text) return line
807536949e0fefe6bd0ab5c3c70b14903c19a078
692,682
def new_list_with_dict_ids(old: list): """Create new list with dicts This function aggregate dict elements with only one key "id" in new list. :param old: The initial list with dicts. :returns: New list with dicts that contain only id. :rtype: list """ new_list = [] for elem in old: if isinstance(elem, dict): new_elem = {"id": elem.get("id")} new_list.append(new_elem) return new_list
12e0d8f46230231e72c8b091801749365071e87d
692,686
def str_to_bool(val: str) -> bool: """Takes string and tries to turn it into bool as human would do. If val is in case insensitive ( "y", "yes", "yep", "yup", "t", "true", "on", "enable", "enabled", "1" ) returns True. If val is in case insensitive ( "n", "no", "f", "false", "off", "disable", "disabled", "0" ) returns False. Else Raise ValueError.""" val = val.lower() if val in { "y", "yes", "yep", "yup", "t", "true", "on", "enable", "enabled", "1", }: return True elif val in {"n", "no", "f", "false", "off", "disable", "disabled", "0"}: return False else: raise ValueError(f"Invalid truth value {val}")
cbd1ecc22a96ff8f80c64ef47d7593877a9da1c1
692,688
def number_of_yang_modules_that_passed_compilation(in_dict: dict, position: int, compilation_condition: str): """ Return the number of the modules that have compilation status equal to the 'compilation_condition'. Arguments: :param in_dict (dict) Dictionary of key:yang-model, value:list of compilation results :param position (int) Position in the list where the 'compilation_condidtion' is :param compilation_condition (str) Compilation result we are looking for - PASSED, PASSED WITH WARNINGS, FAILED :return: the number of YANG models which meet the 'compilation_condition' """ t = 0 for k, v in in_dict.items(): if in_dict[k][position - 1] == compilation_condition: t += 1 return t
d40d10a5601589518aa179822d851628d6b24a0a
692,689
def parse_xgcm_attributes(ds, xc='xt_ocean', xg='xu_ocean', yc='yt_ocean', yg='yu_ocean', zc='st_ocean', zg='sw_ocean'): """ Adds axis attributes needed for xgcm to recognize the grid""" if xc is not None: ds[xc] = ds[xc].assign_attrs(axis='X') if xg is not None: ds[xg] = ds[xg].assign_attrs(axis='X') ds[xg] = ds[xg].assign_attrs(c_grid_axis_shift=0.5) if yc is not None: ds[yc] = ds[yc].assign_attrs(axis='Y') if yg is not None: ds[yg] = ds[yg].assign_attrs(axis='Y') ds[yg] = ds[yg].assign_attrs(c_grid_axis_shift=0.5) if zc is not None: ds[zc] = ds[zc].assign_attrs(axis='Z') if zg is not None: ds[zg] = ds[zg].assign_attrs(axis='Z') ds[zg] = ds[zg].assign_attrs(c_grid_axis_shift=0.5) return ds
c508035b9fcbd8f56ef1c0d3dcd54c42d3804bae
692,690
def name_to_htmltitle(name): """ Return an html version of a course name >>> name_to_htmltitle('123 567 9') '123 567 9' >>> name_to_htmltitle('123 567 9012 45') '123<br>567<br>9012<br>45' """ if len(name) <= 10: return name else: return name.replace(' ', '<br>')
2705e245d80d436a6dba5b25b217318778d4d194
692,692
def cm(inch: float) -> float: """ inch to cm """ return inch * 2.54
6edb993aa1a3cdd8d04ae876f03dec4f9cd60491
692,695
from typing import Iterable from typing import Optional from typing import Union from typing import Counter def is_valid_token_sequence(tokens: Iterable[str], seq: str, sep: Optional[str] = ';') -> Union[None, bool]: """ Checks whether a string consists of a sequence of unique tokens from a fixed set of tokens, separated by a given separator. It is used to check the correctness of values in several OED columns such as ``(Acc|Cond|Loc|Pol|Reins)Peril`` and also ``(Acc|Cond|Loc|Pol|Reins)PerilsCovered``, which must be ``;``-separated sequence of OED peril codes, e.g. ``AA1;WTC;WEC``. :param tokens: The iterable of tokens to check the string tokens against :type tokens: list, tuple, set :param seq: The string to be checked :type seq: str :param sep: (Optional) The separator to use/expect - default is ``;`` :type sep: str :return: Whether the string is valid :rtype: bool """ if not isinstance(seq, str) or any(not isinstance(t, str) for t in tokens) or any(len(t) == 0 for t in tokens): return seq_tokens = [t for t in seq.split(sep) if t] seq_tokens_cntr = Counter(seq_tokens).values() return not ( any(t not in tokens for t in seq_tokens) or any(v > 1 for v in seq_tokens_cntr) )
5668003e8df23229de6e4864289ea0c74d093cfe
692,696
def nodes_create_unwind(labels, property_parameter=None): """ Generate a :code:`CREATE` query using :code:`UNWIND` for batch creation of nodes.:: UNWIND $props AS properties CREATE (n:Gene) SET n = properties Pass the node properties as parameter to the query, e.g. with a :py:obj:`py2neo.Graph`:: graph.run(query, props=[{'id': 1}, {'id': 2}, ...]) You can optionally set the name of the parameter with the argument :code:`property_parameter`:: query = nodes_create_unwind(['Foo'], query_parameter='mynodes') graph.run(query, mynodes=[{'id': 1}, {'id': 2}, ...]) :param labels: Labels for the create query. :type labels: list[str] :param property_parameter: Optional name of the parameter used in the query. Default is 'props'. :type property_parameter: str :return: Query """ if not property_parameter: property_parameter = 'props' return "UNWIND ${0} AS properties CREATE (n:{1}) SET n = properties RETURN count(n) as cnt".format(property_parameter, ":".join(labels))
1723f8e84535ba6d3dcc47578a40e3da511ebcea
692,697
def learning_rate_decay(step, init_lr=5e-4, decay_steps=100000, decay_rate=0.1): """Continuous learning rate decay function. The computation for learning rate is lr = (init_lr * decay_rate**(step / decay_steps)) Args: step: int, the global optimization step. init_lr: float, the initial learning rate. decay_steps: int, the decay steps, please see the learning rate computation above. decay_rate: float, the decay rate, please see the learning rate computation above. Returns: lr: the learning for global step 'step'. """ power = step / decay_steps return init_lr * (decay_rate**power)
ddca7b9a87bc1662c73fafdf26b42fa402a376c3
692,698
def GetPopList(sample_blocks): """ Get a list of populations in the sample_blocks ---------- sample_blocks : list of [hap_blocks] each hap_block is a dictionary with keys 'pop', 'chrom', 'start', 'end' Returns ------- poplist : list of str list of populations represented in the blocks """ poplist = set() for i in range(len(sample_blocks)): for sb in sample_blocks[i]: poplist.add(sb['pop']) return list(poplist)
5f383177832d602c0fbe5464d9bb3fd672287065
692,699
import pickle def read_block(file): """Reads a block from a specified data path. Args: file (string) - a file name of a block to read """ f = open(file, 'rb') return pickle.load(f)
eaa4d2e0e065217f4151bd8babba3dc75cdb80ce
692,703
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): """ Format the error message for when __array_ufunc__ gives up. """ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + ['{}={!r}'.format(k, v) for k, v in kwargs.items()]) args = inputs + kwargs.get('out', ()) types_string = ', '.join(repr(type(arg).__name__) for arg in args) return ('operand type(s) all returned NotImplemented from ' '__array_ufunc__({!r}, {!r}, {}): {}' .format(ufunc, method, args_string, types_string))
4620772f4521325e66798c57e501ffd88ab991d1
692,705
def dlog(num, base=2): """Returns the discrete logarithm of num. For the standard base 2, this is the number of bits required to store the range 0..num.""" return [n for n in range(32) if num < base**n][0]
55812ba61d432fc36817189f3c3935ef151fcb3b
692,706
def _get_sources_with_sink(node_str, connections): """Returns the source nodes that are connected to the sink node with the given string representation. Args: node_str: a string representation of a PipelineNode connections: a list of PipelineConnection instances Returns: a list of PipelineNodes that are connected to the given sink node. """ return [c.source for c in connections if c.sink.is_same_node_str(node_str)]
e7713889bd88833d6ed0967b3f09199eac5f6df4
692,711
def retrieve_block(model_storage, weight_idx, block_x_idx, block_y_idx, block_size_x, block_size_y): """Retrieve a block with given weight_idx, block_x_idx, and block_y_idx Args: model_storage (model_storage): a model's model_storage weight_idx (int): weight index block_x_idx (int): block index in x-axis block_y_idx (int): block index in y-axis block_size_x (int): block size in x-axis block_size_y (int): block size in y-axis Return: numpy array: a block """ b = model_storage['weights_padded'][weight_idx][block_x_idx*block_size_x: (block_x_idx+1)*block_size_x, block_y_idx*block_size_y: (block_y_idx+1)*block_size_y] return b
28118eb869fb350397bed0afbea402a9375db834
692,717
def get_module_name(name: str) -> str: """Get the cog name from the module.""" return name.split(".")[-1]
dda70b4c2a6f1aa2666c0f1df01c6aa6aabde847
692,720
def assign_staging_jobs_for_missing_clusters( support_and_staging_matrix_jobs, prod_hub_matrix_jobs ): """Ensure that for each cluster listed in prod_hub_matrix_jobs, there is an associated job in support_and_staging_matrix_jobs. This is our last-hope catch-all to ensure there are no prod hub jobs trying to run without an associated support/staging job. Args: support_and_staging_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade the support chart and staging hub on clusters that require it. prod_hub_matrix_jobs (list[dict]): A list of dictionaries representing jobs to upgrade production hubs that require it. Returns: support_and_staging_matrix_jobs (list[dict]): Updated to ensure any clusters missing present in prod_hub_matrix_jobs but missing from support_and_staging_matrix_jobs now have an associated support/staging job. """ prod_hub_clusters = {job["cluster_name"] for job in prod_hub_matrix_jobs} support_staging_clusters = { job["cluster_name"] for job in support_and_staging_matrix_jobs } missing_clusters = prod_hub_clusters.difference(support_staging_clusters) if missing_clusters: # Generate support/staging jobs for clusters that don't have them but do have # prod hub jobs. We assume they are missing because neither the support chart # nor staging hub needed an upgrade. We set upgrade_support to False. However, # if prod hubs need upgrading, then we should upgrade staging so set that to # True. for missing_cluster in missing_clusters: provider = next( ( hub["provider"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ), None, ) prod_hubs = [ hub["hub_name"] for hub in prod_hub_matrix_jobs if hub["cluster_name"] == missing_cluster ] new_job = { "cluster_name": missing_cluster, "provider": provider, "upgrade_support": False, "reason_for_support_redeploy": "", "upgrade_staging": True, "reason_for_staging_redeploy": ( "Following prod hubs require redeploy: " + ", ".join(prod_hubs) ), } support_and_staging_matrix_jobs.append(new_job) return support_and_staging_matrix_jobs
3e8809272046dde50cf148dd6dd9ada3e63df9d8
692,721
from datetime import datetime def convert_time(timestamp): """Convert timestamp to datetime.""" return datetime.utcfromtimestamp(timestamp)
38ce9dce7cbebf99838f422b282629bfe7afc10d
692,729
def input_str(option1=None, option2=None): """Returns a string variable taken from the user, otherwise displays an error message. If option1 and option2 are specified, the function returns one of those two options, otherwise it displays an error message. Parameters: option1: First option to choose of type string. option2: Second option to choose of type string. """ while True: input_data = input() if option1 is None and option2 is None: if ''.join(input_data.split()).isalpha(): break else: print("You put invalid data, please try again") if option1 is not None or option2 is not None: input_data = input_data.upper().strip() if input_data.isalpha() and (input_data == option1 or input_data == option2): break else: print("You put invalid data, please try again") return input_data
4c05036fc47bc8524704cd9773e1cc93afc15770
692,741
def in_ranges(value, ranges): """Check if a value is in a list of ranges""" return all(low <= value <= high for low, high in ranges)
91a9b8fb3d225438ddcb21f3e9b4d981edeed29c
692,745
def format_size(size): """ Auxiliary function to convert bytes to a more readable human format. """ suffixes = ['B', 'KB', 'MB', 'GB'] i = 0 while size >= 1024 and i < len(suffixes) - 1: size = size / 1024 i += 1 return f"{size:.2f} {suffixes[i]}"
90bc93c9170fc89f7ac9e66d43eb28c04366f1c6
692,754
def recipelist(recipebook): """ Returns a list of recipes, for command line output """ out = "" out += "Recipes:" for index, recipe in enumerate(recipebook.recipes): out += "\n{}. {}".format(index, recipe.name) return out
e8b05d57a2219fc79c8d25f9d4d6d18780b79574
692,755
def _start_of_option(value: str) -> bool: """Check if the value looks like the start of an option.""" return value[0] == "-" if value else False
0430d39de6260d3cc3d9e9725ef1cc4df8d202cb
692,757
def parse_bibfile_to_dict(bib_path): """Read a bibtex file into a dictionary. Returned dictionary has the form: {'alberga2018prediction': ['title={Prediction of ... }'], 'hawkins2007comparison': ['title={Comparison of ... }'],\ ... } """ dct = {} with open(bib_path, 'r') as f: txt = f.readlines() citekey = None entries = [] for line in txt: line = line.strip() if line: if line.startswith('@'): if citekey is not None: dct[citekey] = entries entries = [] start = line.find('{') end = line.find(',') citekey = '%s' % line[start+1:end].lower() entries.append(line) if citekey is not None: dct[citekey] = entries return dct
1ef3500208432d81ba4e3b8de34ef829c8b15778
692,758
def fmt(fmt_str): """ Helper to convert a format string into a function that applies its argument to the format. :param fmt_str: string appropriate for using with string.format :return: A function which applies its single argument to fmt_str """ return fmt_str.format
64cd64a036372d754c46b625ca901665d4dd6d16
692,760
def get_post_gallery_images(context): """ Returns a list of img related objects selected as 'gallery' """ try: post = context['object'] return post.image_set.filter(img_type='gallery') except: return []
d9ba16e99d55d893f696bfa92063536d8a402b9b
692,761
def my_add(x, y): """Simple function to add two integers together""" return x + y
983629b3c155259269a496100973bc993d8c6724
692,763
def dyck_words_str(n): """ All words consisting of n pairs of correctly matched parentheses """ def dyck_recur(S): a = S.count("(") b = S.count(")") if len(S) > 2*n or b > a: return None elif a == b == n: yield S else: yield from dyck_recur(S+"(") yield from dyck_recur(S+")") yield from dyck_recur("")
4f70db40656ec7505f695918eb944a3bba511087
692,765
def map_from_mi(mi,lfrom,lto): """ create mapping from one level of the multiindex to the other.""" return {k:v for k,v in zip(*([mi.get_level_values(lfrom),mi.get_level_values(lto)]))}
ff66f03a684a659d1e22135dd268102991aea3a3
692,770
def get_donottrack(request): """ Returns ``True`` if ``HTTP_DNT`` header is ``'1'``, ``False`` otherwise. """ return request.META.get('HTTP_DNT') == '1'
0f490a13bf48569022f276b222b1aa83adb78e41
692,776
def getCbsdsNotPartOfPpaCluster(cbsds, ppa_record): """Returns the CBSDs that are not part of a PPA cluster list. Args: cbsds : List of CBSDData objects. ppa_record : A PPA record dictionary. Returns: A list of CBSDs that are not part of the PPA cluster list. """ cbsds_not_part_of_ppa_cluster = [] # Compare the list of CBSDs with the PPA cluster list for cbsd in cbsds: if cbsd['id'] not in ppa_record['ppaInfo']['cbsdReferenceId']: cbsds_not_part_of_ppa_cluster.append(cbsd) return cbsds_not_part_of_ppa_cluster
3593ed41851e5d63243dae4665ccf593eccc763e
692,777
def factorial_rec(n): """Recursively compute the factorial of n.""" if n == 0: return 1 else: return n * factorial_rec(n-1)
7276f99d82079d8b65e657813c6855d2f5290cd3
692,778