content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def normalize(vector, size): """ Normalizes a vector given the vector and the corresponding document length """ for word in vector: vector[word]=vector[word]/float(size) return vector
ca8f8827c0c70283d38e7caa8786e171ef8e40bc
689,745
import pickle def load_pkl(path: str, load_mode: str = 'rb'): """ Read a pickle file. :param path: str, filepath to read :param load_mode: str, read mode :return: contents of the pickle file """ return pickle.load(open(path, load_mode))
14397cc224f73ba917b9be80f79a2d98df65fcae
689,751
def first_neighbours_last(batches, current_batch_idx, nb_left, nb_right): """Build a sublist from a large batch list. This is used to display batch links for a large table. arguments: * :param batches: a large sequence (may be a batches as well) * :param current_batch_idx: index of the current batch or item * :param nb_left: number of neighbours before the current batch * :param nb_right: number of neighbours after the current batch The returned list gives: * the first batch * a None separator if necessary * left neighbours of the current batch * the current batch * right neighbours of the current batch * a None separator if necessary * the last batch Example: >>> from pyams_batching.batch import first_neighbours_last as f_n_l >>> batches = range(100) # it works with real batches as well We try to get subsets at different levels: >>> for i in range(0,6): ... f_n_l(batches, i, 2, 2) [0, 1, 2, None, 99] [0, 1, 2, 3, None, 99] [0, 1, 2, 3, 4, None, 99] [0, 1, 2, 3, 4, 5, None, 99] [0, None, 2, 3, 4, 5, 6, None, 99] [0, None, 3, 4, 5, 6, 7, None, 99] >>> for i in range(93, 99): ... f_n_l(batches, i, 2, 2) [0, None, 91, 92, 93, 94, 95, None, 99] [0, None, 92, 93, 94, 95, 96, None, 99] [0, None, 93, 94, 95, 96, 97, None, 99] [0, None, 94, 95, 96, 97, 98, 99] [0, None, 95, 96, 97, 98, 99] [0, None, 96, 97, 98, 99] Try with no previous and no next batch: >>> f_n_l(batches, 0, 0, 0) [0, None, 99] >>> f_n_l(batches, 1, 0, 0) [0, 1, None, 99] >>> f_n_l(batches, 2, 0, 0) [0, None, 2, None, 99] Try with only 1 previous and 1 next batch: >>> f_n_l(batches, 0, 1, 1) [0, 1, None, 99] >>> f_n_l(batches, 1, 1, 1) [0, 1, 2, None, 99] >>> f_n_l(batches, 2, 1, 1) [0, 1, 2, 3, None, 99] >>> f_n_l(batches, 3, 1, 1) [0, None, 2, 3, 4, None, 99] Try with incoherent values: >>> f_n_l(batches, 0, -4, -10) Traceback (most recent call last): ... AssertionError >>> f_n_l(batches, 2000, 3, 3) Traceback (most recent call last): ... AssertionError """ sublist = [] # setup some batches and indexes first_idx = 0 last_idx = len(batches) - 1 assert 0 <= current_batch_idx <= last_idx assert nb_left >= 0 and nb_right >= 0 prev_idx = current_batch_idx - nb_left next_idx = current_batch_idx + 1 first_batch = batches[0] last_batch = batches[last_idx] # add first batch if first_idx < current_batch_idx: sublist.append(first_batch) # there must probably be space if first_idx + 1 < prev_idx: # we skip batches between first batch and first previous batch sublist.append(None) # add previous batches for i in range(prev_idx, prev_idx + nb_left): if first_idx < i: # append previous batches sublist.append(batches[i]) # add current batch sublist.append(batches[current_batch_idx]) # add next batches for i in range(next_idx, next_idx + nb_right): if i < last_idx: # append previous batch sublist.append(batches[i]) # there must probably be space if next_idx + nb_right < last_idx: # we skip batches between last batch and last next batch sublist.append(None) # add last batch if current_batch_idx < last_idx: sublist.append(last_batch) return sublist
472d5bce1d137742f6844222c148d2063306c688
689,752
import csv def fetch_education_urls(input_file): """ Given a local input file, parse it and return a list of GOV.UK URLs. """ documents = [] with open(input_file, 'r') as f: reader = csv.reader(f) # skip headers next(reader, None) documents = list(reader) return [document[0] for document in documents]
b171eb2b961c68752bffc7acc2ae028fc07e82db
689,758
def createPointWkt(coords): """Create WKT POINT string. Args: coords (list): Two item list representing single point coordinate. Returns: str: WKT POINT string. """ return 'POINT(' + str(coords[0]) + ' ' + str(coords[1]) + ')'
13b1a4864bb9cccc56e3c34bc340f1a2f4d4c3ee
689,761
def mu_lambda(E=None, nu=None, lam=None, mu=None): """Get lame's parameters. Build the lame constants of a material using the elastic constants for isotropic materials. You must specify exactly two of the four available constants. For example, you can provide E and mu as arguments. Args: E (float): Young's modulus or elastic modulus. nu (float): Poisson's ratio. mu (float): lame's second parameter or shear modulus G. lam (flaot): lame's first parameter lambda. Returns: (float, float): mu and lambda """ number_of_inputs = sum(p is not None for p in (E, nu, lam, mu)) if number_of_inputs != 2: raise ValueError( "Two elastic constants are expected and received" + f" {number_of_inputs} instead." ) if E is not None and nu is not None: lam = E * nu / (1 + nu) / (1 - 2 * nu) mu = E / 2 / (1 + nu) elif E is not None: if mu is not None: lam = mu * (E - 2 * mu) / (3 * mu - E) elif lam is not None: R = (E ** 2 + 9 * lam ** 2 + 2 * E * lam) ** 0.5 mu = (E - 3 * lam + R) / 4 elif nu is not None: if mu is not None: lam = 2 * mu * nu / (1 - 2 * nu) elif lam is not None: mu = lam * (1 - 2 * nu) / (2 * nu) return mu, lam
665891693e265a485d2a618e70f4bb01f209b1c1
689,762
import re def _match_regex(pattern: str, path: str) -> bool: """True if `path` matches `pattern`.""" return re.match(pattern, path) is not None
d25fdfc6c87486b081f549e553a09e1d5d19cfdf
689,764
def clean_path(source): """ Replace backslashes from source.file_name with a slash """ source.file_name = source.file_name.replace('\\', '/') return source
eb9cef7fd346b79630fce9d09c3748ca5cacbd4d
689,766
import re def remove_non_alphanumeric_symbols(s): """Make text usable for attribute name""" return re.sub(r"\W", "_", s)
01e2db12df4baaee11fbdb1ec2f6684b4d1a3f0c
689,768
def build_falloff(parameters, falloff_function): """Creates falloff reaction Troe parameter string Parameters ---------- parameters : numpy.ndarray Array of falloff parameters; length varies based on ``falloff_function`` falloff_function : {'Troe', 'SRI'} Type of falloff function Returns ------- falloff_string : str String of falloff parameters """ if falloff_function == 'Troe': falloff_string = ('TROE / ' + f'{parameters[0]} {parameters[1]} ' f'{parameters[2]} {parameters[3]} /\n' ) elif falloff_function == 'SRI': falloff_string = ('SRI / ' + f'{parameters[0]} {parameters[1]} ' + f'{parameters[2]} {parameters[3]} {parameters[4]} /\n' ) else: raise NotImplementedError(f'Falloff function not supported: {falloff_function}') return falloff_string
c7937f524f720e903998fcf38f6f5bed9e361d94
689,770
import time import hmac import hashlib def verify_request( *, timestamp: str, signature: str, request_data: bytes, signing_secret: str ) -> bool: """ This function validates the received using the process described https://api.slack.com/docs/verifying-requests-from-slack and using the code in https://github.com/slackapi/python-slack-events-api Parameters ---------- timestamp: str originates from headers['X-Slack-Request-Timestamp'] signature: str originates from headers['X-Slack-Signature'] request_data: bytes The originating request byte-stream; if using Flask, then originates from request.get_data() signing_secret : str originates from the App config Returns ------- bool True if signature is validated False otherwise """ if abs(time.time() - int(timestamp)) > 60 * 5: # The request timestamp is more than five minutes from local time. # It could be a replay attack, so let's ignore it. return False req = str.encode('v0:' + str(timestamp) + ':') + request_data request_hash = 'v0=' + hmac.new( str.encode(signing_secret), req, hashlib.sha256 ).hexdigest() return hmac.compare_digest(request_hash, signature)
e3901cf8abc467e2203d8faca638754b6042bdf6
689,771
from functools import reduce def bytes_to_int(bindata): """Convert a sequence of bytes into a number""" return reduce(lambda x,y: (x<<8) | y, map(ord,bindata), 0)
9ab8e1c2f66d0d385cf394c9a674e09d9b8bbded
689,773
import re def word_filter(word): """ The filter used for deleting the noisy words in changed code. Here is the method: 1. Delete character except for digit, alphabet, '_'. 2. the word shouldn't be all digit. 3. the length should large than 2. Args: word Returns: True for not filtering, False for filtering. """ if word[:2] == '0x': return False if '=' in word: return False if '/' in word: return False if '.' in word: return False if '$' in word: return False word = re.sub("[^0-9A-Za-z_]", "", word) if(word.isdigit()): return False if(len(word) <= 2): return False return True
c29e80c7e6839a576b95ee99b18e81a31b95a020
689,774
import json def get(event, context): """Handle the GET request and return the full lambda request event""" return { "statusCode": 200, "body": json.dumps(event) }
f55c3c8ae3387a8019d601fc0c597840cf73c929
689,781
import importlib def import_model(type_module, type_name): """ :param str type_module: :param str type_name: :return: Model type :rtype: type :raise ImportError: When the model cannot be found. """ try: mod = importlib.import_module(type_module) except ImportError as e: raise ImportError("Import {}.{} failed; {}".format(type_module, type_name, e)) try: type_ = getattr(mod, type_name) except AttributeError: raise ImportError("Import {0}.{1} failed; No class named {1}".format(type_module, type_name)) return type_
0cbda14880f73854edec6d67445aee9b5528e07e
689,782
import re def float_from_str(text): """ Remove uncertainty brackets from strings and return the float. """ return float(re.sub("\(.+\)", "", text))
4567abb8ba6c52efa3b37bddcb17dc351ba37dcd
689,785
import string def capwords(value, sep=None): """ Split the argument into words using str.split(), capitalize each word using str.capitalize(), and join the capitalized words using str.join(). If the optional second argument sep is absent or None, runs of whitespace characters are replaced by a single space and leading and trailing whitespace are removed, otherwise sep is used to split and join the words. """ return string.capwords(value, sep=sep)
e481cab8af670b41130a1728869fa424e07ed5b8
689,786
def filter_OD(origins, destinations): """ takes lists of origins and destinations in (1D notation) and returns list of tuples with OD coorinates """ if len(origins) == len(destinations): return list(zip(origins, destinations)) else: return []
b46f50a3fcc3a602116eda549eb2fdec31c95bcb
689,795
from typing import Tuple def do_instruction(current_position: int, accumulator_value: int, instruction: int, instruction_value: int) -> Tuple[int, int]: """Perform instruction.""" if instruction == 0: current_position += 1 elif instruction == 1: accumulator_value += instruction_value current_position += 1 elif instruction == 2: current_position += instruction_value return (current_position, accumulator_value)
5fcdd85e875fbd65a295f4c25e5f14cb7a786d4f
689,801
def get_vertex_names_from_indices(mesh, indices): """ Returns a list of vertex names from a given list of face indices :param mesh: str :param indices: list(int) :return: list(str) """ found_vertex_names = list() for index in indices: vertex_name = '{}.vtx[{}]'.format(mesh, index) found_vertex_names.append(vertex_name) return found_vertex_names
19ba81833a825310ae6e8958500ab60fe4ea3ac9
689,803
def add_articles_from_xml(thread, xml_root): """Helper function to create Article objects from XML input and add them to a Thread object.""" added_items = False for item in xml_root.find('articles').findall('article'): data = { 'id': int(item.attrib['id']), 'username': item.attrib['username'], 'link': item.attrib['link'], 'postdate': item.attrib['postdate'], 'editdate': item.attrib['editdate'], 'numedits': int(item.attrib['numedits']), } thread.add_article(data) added_items = True return added_items
be7e32fdd4831c101842fb8f939d2ce0ea3942f5
689,809
import base64 def decode(payload): """ https://en.wikipedia.org/wiki/Base64#URL_applications modified Base64 for URL variants exist, where the + and / characters of standard Base64 are respectively replaced by - and _ """ variant = payload.replace('-', '+').replace('_', '/') return base64.b64decode(variant).decode()
f272a3b81544a32a07a1c8aecaa66fa39a51f63e
689,811
def BuildInstanceConfigOperationTypeFilter(op_type): """Builds the filter for the different instance config operation metadata types.""" if op_type is None: return '' base_string = 'metadata.@type:type.googleapis.com/google.spanner.admin.database.v1.' if op_type == 'INSTANCE_CONFIG_CREATE': return base_string + 'CreateInstanceConfigMetadata' if op_type == 'INSTANCE_CONFIG_UPDATE': return base_string + 'UpdateInstanceConfigMetadata'
dbd633c199c78bd529109c258036095a1e681986
689,817
from datetime import datetime def _parse_publishing_date(string): """ Parses the publishing date string and returns the publishing date as datetime. Input example (without quotes): "Wed, 09 Nov 2016 17:11:56 +0100" """ return datetime.strptime(string,"%a, %d %b %Y %H:%M:%S +0100")
e0ddc6da2a8acb90cceb4649f0d8491531408fcb
689,821
def multiply(k, v1): """Returns the k*v1 where k is multiplied by each element in v1. Args: k (float): scale factor. v1 (iterable): a vector in iterable form. Returns: iterable: the resultant vector. """ newIterable = [k * i for i in v1] return tuple(newIterable) if type(v1) == tuple else newIterable
6ef0bc8fc0321c803655062c88cc38c23608a068
689,822
import string def is_valid_sha1(sha1: str) -> bool: """True iff sha1 is a valid 40-character SHA1 hex string.""" if sha1 is None or len(sha1) != 40: return False return set(sha1).issubset(string.hexdigits)
bcf0dc6bb568bbbfaaa0b162ee3d400cfe79ce6f
689,824
def nvmf_subsystem_remove_listener( client, nqn, trtype, traddr, trsvcid, adrfam, tgt_name=None): """Remove existing listen address from an NVMe-oF subsystem. Args: nqn: Subsystem NQN. trtype: Transport type ("RDMA"). traddr: Transport address. trsvcid: Transport service ID. tgt_name: name of the parent NVMe-oF target (optional). adrfam: Address family ("IPv4", "IPv6", "IB", or "FC"). Returns: True or False """ listen_address = {'trtype': trtype, 'traddr': traddr, 'trsvcid': trsvcid} if adrfam: listen_address['adrfam'] = adrfam params = {'nqn': nqn, 'listen_address': listen_address} if tgt_name: params['tgt_name'] = tgt_name return client.call('nvmf_subsystem_remove_listener', params)
ab3ec836f330a1e67befdbf4cc31f6bda489baa5
689,825
def _is_projective(parse): """ Is the parse tree projective? Returns -------- projective : bool True if a projective tree. """ for m, h in enumerate(parse): for m2, h2 in enumerate(parse): if m2 == m: continue if m < h: if ( m < m2 < h < h2 or m < h2 < h < m2 or m2 < m < h2 < h or h2 < m < m2 < h ): return False if h < m: if ( h < m2 < m < h2 or h < h2 < m < m2 or m2 < h < h2 < m or h2 < h < m2 < m ): return False return True
10032f43b066c60a2754f0b639f2e572d4b0b547
689,826
import torch def safe_power(x, exponent, *, epsilon=1e-6): """ Takes the power of each element in input with exponent and returns a tensor with the result. This is a safer version of ``torch.pow`` (``out = x ** exponent``), which avoids: 1. NaN/imaginary output when ``x < 0`` and exponent has a fractional part In this case, the function returns the signed (negative) magnitude of the complex number. 2. NaN/infinite gradient at ``x = 0`` when exponent has a fractional part In this case, the positions of 0 are added by ``epsilon``, so the gradient is back-propagated as if ``x = epsilon``. However, this function doesn't deal with float overflow, such as 1e10000. Parameters ---------- x : torch.Tensor or float The input base value. exponent : torch.Tensor or float The exponent value. (At least one of ``x`` and ``exponent`` must be a torch.Tensor) epsilon : float A small floating point value to avoid infinite gradient. Default: 1e-6 Returns ------- out : torch.Tensor The output tensor. """ # convert float to scalar torch.Tensor if not torch.is_tensor(x): if not torch.is_tensor(exponent): # both non-tensor scalars x = torch.tensor(x) exponent = torch.tensor(exponent) else: x = torch.tensor(x, dtype=exponent.dtype, device=exponent.device) else: # x is tensor if not torch.is_tensor(exponent): exponent = torch.tensor(exponent, dtype=x.dtype, device=x.device) exp_fractional = torch.floor(exponent) != exponent if not exp_fractional.any(): # no exponent has a fractional part return torch.pow(x, exponent) x, x_lt_0, x_eq_0, exponent, exp_fractional = torch.broadcast_tensors( x, x < 0, x == 0, exponent, exp_fractional) # deal with x = 0 if epsilon != 0: mask = x_eq_0 & exp_fractional if mask.any(): # has zero value x = x.clone() x[mask] += epsilon # deal with x < 0 mask = x_lt_0 & exp_fractional if mask.any(): x = x.masked_scatter(mask, -x[mask]) out = torch.pow(x, exponent) out = out.masked_scatter(mask, -out[mask]) else: out = torch.pow(x, exponent) return out
c384c43482fd9cba4957b115555c58f1c6fa50ce
689,830
def fattr(key, value): """Decorator for function attributes >>> @fattr('key', 42) ... def f(): ... pass >>> f.key 42 """ def wrapper(fn): setattr(fn, key, value) return fn return wrapper
a69d8c929cb53022a8c7b563d3a268d880d32974
689,831
import pickle def unpickle_from_disk(filename): """Unpickle an object from disk. Requires a complete filename Passes Exception if file could not be loaded. """ # Warning: only using 'r' or 'w' can result in EOFError when unpickling! try: with open(filename, "rb") as pickle_file: obj = pickle.load(pickle_file) except UnicodeDecodeError as e: with open(filename, "rb") as pickle_file: obj = pickle.load(pickle_file, encoding='latin1') except Exception as e: print('Unable to load data ', filename, ':', e) raise return obj
7b4f1d4d6534c2bdc1c8377198e92441c0c69ef1
689,834
def merge_intervals(intervals): """ Merge intervals in the form of a list. """ if intervals is None: return None intervals.sort(key=lambda i: i[0]) out = [intervals.pop(0)] for i in intervals: if out[-1][-1] >= i[0]: out[-1][-1] = max(out[-1][-1], i[-1]) else: out.append(i) return out
609c5d463995c64dd331d58136ef41a8d4bf55cc
689,842
def _check_file(f, columns): """Return shell commands for testing file 'f'.""" # We write information to stdout. It will show up in logs, so that the user # knows what happened if the test fails. return """ echo Testing that {file} has at most {columns} columns... grep -E '^.{{{columns}}}' {path} && err=1 echo """.format(columns=columns, path=f.path, file=f.short_path)
2daad603a054f13e08504a8af17f62fecee49cd9
689,844
def find_if(cond, seq): """ Return the first x in seq such that cond(x) holds, if there is one. Otherwise return None. """ for x in seq: if cond(x): return x return None
0978689c29bc06fb1783083cec7f5e7f87eeb07e
689,846
from typing import OrderedDict def _list_to_dict(items): """ Convert a list of dicts to a dict with the keys & values aggregated >>> _list_to_dict([ ... OrderedDict([('x', 1), ('y', 10)]), ... OrderedDict([('x', 2), ('y', 20)]), ... OrderedDict([('x', 3), ('y', 30)]), ... ]) OrderedDict([('x', [1, 2, 3]), ('y', [10, 20, 30])]) """ d = OrderedDict() for item in items: for k, v in item.items(): if k not in d: d[k] = [] d[k].append(v) return d
64dc70a62e423e664f800a1e31ef2f5252e95265
689,849
def check(product, data): """Check if product does not exist in the data""" if type(data) == dict: data = data.values() for d in data: if product == d: return False return True
6162e986f4e55c6dfaf4b9cf9c1b737aaf311dc7
689,856
def _parse_cal_product(cal_product): """Split `cal_product` into `cal_stream` and `product_type` parts.""" fields = cal_product.rsplit('.', 1) if len(fields) != 2: raise ValueError(f'Calibration product {cal_product} is not in the format ' '<cal_stream>.<product_type>') return fields[0], fields[1]
69734101e3715939d2032892aebfd762e75849d6
689,863
def largest(a, b): """ This function takes two numbers to determine which one is larger """ if a > b: larger = a else: larger = b return larger
9f48283451944a3f6d748b76368019ca1bcbf3db
689,865
def extend_range(min_max, extend_ratio=.2): """Symmetrically extend the range given by the `min_max` pair. The new range will be 1 + `extend_ratio` larger than the original range. """ mme = (min_max[1] - min_max[0]) * extend_ratio / 2 return (min_max[0] - mme, min_max[1] + mme)
16c8ba3b54b885ab546caaf02fc989512d861b12
689,866
def build_risk_dataframe(financial_annual_overview): """Build risk dataframe Notes: Copies financial_annual_overview Args: financial_annual_overview (dataframe): An annual overview of financial data Returns: risk_dataframe (dataframe): An instance of a annual overview of financial data with a simulation of risk """ risk_dataframe = financial_annual_overview.copy() return risk_dataframe
bcee282dd9efa0b86c4b278725ac58f975a598eb
689,868
from typing import Tuple import struct def bytes_to_shortint(byteStream: bytes) -> Tuple[int]: """Converts 2 bytes to a short integer""" # Ignore this in typing, as the 'H' will guarantee ints are returned return struct.unpack('H', byteStream)
ec5319b8d19c7f3e653349eac31daaf4a30c1c0f
689,870
def make_transparent(img, bg=(255, 255, 255, 255)): """Given a PIL image, makes the specified background color transparent.""" img = img.convert("RGBA") clear = bg[0:3]+(0,) pixdata = img.load() width, height = img.size for y in range(height): for x in range(width): if pixdata[x,y] == bg: pixdata[x,y] = clear return img
78ee0c3a7e17b131820d710982e17f32b68a0cca
689,872
import pkg_resources def get_words(list_name: str): """ Reads the given word list from file into a list of capitalized words """ resource_package = __name__ resource_path = "/".join(("data", f"{list_name}.txt")) word_list = pkg_resources.resource_string(resource_package, resource_path) word_list = [word.decode("UTF-8") for word in word_list.split(b"\n")] word_list = [w[:5].upper() for w in word_list] return word_list
30a31c53aa3fd081be6ee01017f6c6f850765c67
689,875
def tree_prec_to_adj(prec, root=0): """Transforms a tree given as predecessor table into adjacency list form :param prec: predecessor table representing a tree, prec[u] == v iff u is descendant of v, except for the root where prec[root] == root :param root: root vertex of the tree :returns: undirected graph in listlist representation :complexity: linear """ n = len(prec) graph = [[prec[u]] for u in range(n)] # ajouter les prédécesseurs graph[root] = [] for u in range(n): # ajouter les descendants if u != root: graph[prec[u]].append(u) return graph
62065680744c0dcb9086b8898770685533397979
689,876
def minutesToHours(minutes): """ (number) -> float convert input minutes to hours; return hours >>> minutesToHours(60) 1.0 >>> minutesToHours(90) 1.5 >>>minutesToHours(0) 0.0 """ hours = minutes / 60 hours = round(hours, 2) return hours
d2dbcba8f3e78fafb84bd9d23f456c005467bca5
689,881
def url_to_be(url): """An expectation for checking the current url. url is the expected url, which must be an exact match returns True if the url matches, false otherwise.""" def _predicate(driver): return url == driver.current_url return _predicate
df97a46fc3b2b9702969db8b03fd34892b89df62
689,887
def _get_ordered_keys(rows, column_index): """ Get ordered keys from rows, given the key column index. """ return [r[column_index] for r in rows]
109b1d14ece528b7c08394003945c8a80ff9d335
689,888
def _older_than(number, unit): """ Returns a query item matching messages older than a time period. Args: number (int): The number of units of time of the period. unit (str): The unit of time: "day", "month", or "year". Returns: The query string. """ return f"older_than:{number}{unit[0]}"
f5389e67f5aa973b57187c395a11eaed79b95bef
689,891
import fnmatch from pathlib import Path def generate_lists_of_filepaths_and_filenames(input_file_list: list): """For a list of added and modified files, generate the following: - A list of unique filepaths to cluster folders containing added/modified files - A set of all added/modified files matching the pattern "*/cluster.yaml" - A set of all added/modified files matching the pattern "*/*.values.yaml" - A set of all added/modified files matching the pattern "*/support.values.yaml" Args: input_file_list (list[str]): A list of files that have been added or modified in a GitHub Pull Request Returns: list[str]: A list of unique filepaths to cluster folders set[str]: A set of all files matching the pattern "*/cluster.yaml" set[str]: A set of all files matching the pattern "*/*.values.yaml" set[str]: A set of all files matching the pattern "*/support.values.yaml" """ patterns_to_match = ["*/cluster.yaml", "*/*.values.yaml", "*/support.values.yaml"] cluster_filepaths = [] # Identify cluster paths amongst target paths depending on the files they contain for pattern in patterns_to_match: cluster_filepaths.extend(fnmatch.filter(input_file_list, pattern)) # Get absolute paths cluster_filepaths = [Path(filepath).parent for filepath in cluster_filepaths] # Get unique absolute paths cluster_filepaths = list(set(cluster_filepaths)) # Filter for all added/modified cluster config files cluster_files = set(fnmatch.filter(input_file_list, "*/cluster.yaml")) # Filter for all added/modified helm chart values files values_files = set(fnmatch.filter(input_file_list, "*/*.values.yaml")) # Filter for all add/modified support chart values files support_files = set(fnmatch.filter(input_file_list, "*/support.values.yaml")) return cluster_filepaths, cluster_files, values_files, support_files
e712c05b0bc28225db88b66a841ce31c6a1c898f
689,892
def handle(string: str) -> str: """ >>> handle('https://github.com/user/repo') 'user/repo' >>> handle('user/repo') 'user/repo' >>> handle('') '' """ splt = string.split("/") return "/".join(splt[-2:] if len(splt) >= 2 else splt)
bbd00c63b0a037eda08ce1f4fe3ed97ef8978f35
689,894
def proxy_result_as_dict(obj): """ Convert SQLAlchemy proxy result object to list of dictionary. """ return [{key: value for key, value in row.items()} for row in obj]
6e7244fa47553d234fba4568d41110d095330704
689,897
def write_env(env_dict, env_file): """ Write config vars to file :param env_dict: dict of config vars :param env_file: output file :return: was the write successful? """ content = ["{}={}".format(k, v) for k, v in env_dict.items()] written = True try: with open(env_file, 'w') as file: file.write('\n'.join(content)) except IOError: written = False return written
0f752e3966fa5fa9120d74b91643c2b9f9da5704
689,899
def get_image(camera): """Captures a single image from the camera and returns it in PIL format.""" data = camera.read() _, im = data return im
39897fb0ed6a119eca947dba3528618095b06af1
689,901
def _hashSymOpList(symops): """Return hash value for a sequence of `SymOp` objects. The symops are sorted so the results is independent of symops order. Parameters ---------- symops : sequence The sequence of `SymOp` objects to be hashed Returns ------- int The hash value. """ ssop = sorted(str(o) for o in symops) rv = hash(tuple(ssop)) return rv
a8c8bc12de9cc1f135bcec38e94b45a79eebfe33
689,907
def add_PDF_field_names(equiplist, type="NonEnc"): """Takes a list of items and their type and returns a dictionary with the items as values and the type followed by a sequential number (type0, type1, etc.) as keys. These are generally used to fill fields in a blank PDF, with keys corresponding to field names. """ equipdict = {} for index, item in enumerate(equiplist): prefix = "".join((type, str(index))) equipdict[prefix] = equiplist[index] return equipdict
4ec5d29e5f8b6c2f66b83628fcb412c2b92dd1f9
689,909
def sclose(HDR): """input: HDR_TYPE HDR output: [-1, 0] Closes the according file. Returns 0 if successfull, -1 otherwise.""" if HDR.FILE.OPEN != 0: HDR.FILE.FID.close() HDR.FILE.FID = 0 HDR.FILE.OPEN = 0 return 0 return -1 # End of SCLOSE ########### # SREAD # ###########
f1ae32a8a37ceda75b06bbf5482225e9f96e7be1
689,910
import yaml def read_yaml_file(file_path): """Parses yaml. :param file_path: path to yaml file as a string :returns: deserialized file """ with open(file_path, 'r') as stream: data = yaml.safe_load(stream) or {} return data
922525ed3ef450d2e0bb0e99b1294e81a9ef7e6e
689,914
def byte_to_string(byte): """ Converts an array of integer containing bytes into the equivalent string version :param byte: The array to process :return: The calculated string """ hex_string = "".join("%02x" % b for b in byte) return hex_string
e4a38e1cf78d2db8417935d833b7575d9850c639
689,919
import re def get_indent(str_): """ Find length of initial whitespace chars in `str_` """ # type: (str) -> int match = re.search(r'[^\s]|$', str_) if match: return match.start() else: return 0
a9de80043341b062326bfa58322c37100c91aa06
689,921
def get_acph2_m2_min(m1: float) -> float: """ Get minimum value of m2 (second moment) for ACPH(2) fitting. According to [1], M2 has only lower bound since pow(CV, 2) should be greater or equal to 0.5. If m1 < 0, then `ValueError` is raised. Parameters ---------- m1 : float Returns ------- m2_min : float Minimum eligble value of the second moment. """ if m1 < 0: raise ValueError(f"Expected m1 > 0, but m1 = {m1}") return 1.5 * m1**2
e2f0982755fb09a51a1db352c92b21bfd2058e03
689,922
def clamp(value, lower=None, upper=None): """ Returns value no lower than lower and no greater than upper. Use None to clamp in one direction only. """ if lower is not None: value = max(value, lower) if upper is not None: value = min(value, upper) return value
c49e4b82296ea511e6eabe1daf85f769e8202130
689,923
def _finish_plot(ax, names, legend_loc=None, no_info_message="No Information"): """show a message in the axes if there is no data (names is empty) optionally add a legend return Fase if names is empty, True otherwise""" if( not names ): ax.text(0.5,0.5, no_info_message, fontweight='bold', va='center', ha='center', transform=ax.transAxes) return False if( legend_loc is not None ): ax.legend(names, loc=legend_loc) return True
4aaf0c0d197d12086d6dae6f69bb6733fc674751
689,924
import json import fnmatch from pathlib import Path def cmpmanifests(manifest_path_1, manifest_path_2, patterns=None, ignore=None): """Bit-accuracy test between two manifests. Their format is {filepath: {md5, size_st}}""" with manifest_path_1.open() as f: manifest_1 = json.load(f) with manifest_path_2.open() as f: manifest_2 = json.load(f) # print(manifest_1) # print(manifest_2) # print(set(manifest_1.keys()) & set(manifest_2.keys())) if not patterns: patterns = ['*'] if not ignore: ignore = [] # print(patterns) mismatch = set() # not the same match = set() # the same only_in_1 = set() # exists in dir_1 but not in dir_1 errors = set() # or errors accessing for pattern in patterns: pattern = f'*{pattern}' for file_1_str, meta_1 in manifest_1.items(): if not fnmatch.fnmatch(file_1_str, pattern): continue file_1 = Path(file_1_str) if any(fnmatch.fnmatch(file_1.name, i) for i in ignore): continue if file_1_str in manifest_2: is_same = meta_1['md5'] == manifest_2[file_1_str]['md5'] if not is_same: mismatch.add(file_1) else: match.add(file_1) else: only_in_1.add(file_1) return { "mismatch": list(mismatch), "match": list(match), "only_in_1": list(only_in_1), "errors": list(errors), }
7cb77a4ed39ce04064e3bc832ef249a490d16500
689,929
import random def rand_uniform(a, b): """ Returns a sample from a uniform [a, b] distribution. """ return random.random() * (b - a) + a
9d61bd4577f9a93832c0002f014309250279b84f
689,930
from typing import Iterator from typing import ByteString from typing import Any from typing import Union def hash_a_byte_str_iterator( bytes_iterator: Iterator[ByteString], hasher: Any, as_hex_str: bool = False ) -> Union[ByteString, str]: """ Get the hash digest of a binary string iterator. https://stackoverflow.com/a/3431835/105844 Parameters ---------- bytes_iterator : Iterator[ByteString] The byte iterator hasher : Any The hash function from `hashlib` as_hex_str : {'False','True'}, optional Return the digest as bytes, or a hexidecimal string, by default False Returns ------- Union[ByteString, str] The digest of the input bytes, as bytes or a hexidcimal string, by default bytes. """ for block in bytes_iterator: hasher.update(block) return hasher.hexdigest() if as_hex_str else hasher.digest()
d1e9814d30e78b4c89484a2c11296c233a3c6d92
689,935
def recursive_config_join(config1: dict, config2: dict) -> dict: """Recursively join 2 config objects, where config1 values override config2 values""" for key, value in config2.items(): if key not in config1: config1[key] = value elif isinstance(config1[key], dict) and isinstance(value, dict): config1[key] = recursive_config_join(config1[key], value) return config1
64dc0bbcebcf20ba913828d05e2004f461909b8f
689,939
def iterativeFactorial(num): """assumes num is a positive int returns an int, num! (the factorial of n) """ factorial = 1 while num > 0: factorial = factorial*num num -= 1 return factorial
415234f726443a385b4d65e68fb36e7c52e9b042
689,941
def remove_code_parameter_from_uri(url): """ This removes the "code" parameter added by the first ORCID call if it is there, and trims off the trailing '/?' if it is there. """ return url.split("code")[0].strip("&").strip("/?")
da9dc972ead23886b7f20d6d04ff4afa0a2dce99
689,942
from pathlib import Path from typing import List from typing import Dict import time def get_dir_data(path: Path) -> List[Dict[str, str]]: """Returns list of files and folders in given directory sorted by type and by name. Parameters ---------- path : Path Path to directory which will be explored. Returns ------- List[Dict[str, str]] List of dicts that contain file/folder attributes: name, type (file/folder/unknown), last modification time. """ output_list = [] for item in path.iterdir(): if item.is_dir(): item_type = "folder" elif item.is_file(): item_type = "file" else: item_type = "unknown" output_list.append( { "name": item.name, "type": item_type, "time": time.ctime(item.stat().st_mtime), } ) output_list.sort(key=lambda x: (x["type"], x["name"])) return output_list
e1f37b8e968c1b04668c830c0385a10d9513fc2a
689,947
def decide_compiler(config): """确定使用的编译器 如果设置中使用的是clang,则使用设置中的编译器 否则使用默认的clang++ """ if 'clang'in config: return config else: return 'clang++'
19510e4c646c1c392d6a66f2638cbc1fa8242735
689,949
def map_to_45(x): """ (y-y1)/(x-x1) = (y2-y1)/(x2-x1) ---> x1 = 1, x2 = 5, y1 = 1, y2 = 4.5 output = output_start + ((output_end - output_start) / (input_end - input_start)) * (input - input_start) """ input_start = 1 input_end = 5 output_start = 1 output_end = 4.5 if x >= 5: return 4.5 return output_start + ((output_end - output_start) / (input_end - input_start)) * (x - input_start)
0b3c62dfb99d24ee8a85e9ff7c621676db530e57
689,952
import struct def read_unsigned_var_int(file_obj): """Read a value using the unsigned, variable int encoding.""" result = 0 shift = 0 while True: byte = struct.unpack(b"<B", file_obj.read(1))[0] result |= ((byte & 0x7F) << shift) if (byte & 0x80) == 0: break shift += 7 return result
a69ccd69907803109a98672bac4f26a738fe00aa
689,957
def phrase(term: str) -> str: """ Format words to query results containing the desired phrase. :param term: A phrase that appears in that exact form. (e.q. phrase "Chicago Bulls" vs. words "Chicago" "Bulls") :return: String in the format google understands """ return '"{}"'.format(term)
8a327ca9dc9b223e4ba9adbba8d7f4ed85d7db68
689,958
def clear_tier_dropdown(_): """ whenever a new cluster is in focus reset the tier dropdown to empty again Parameters ---------- _ : str reactive trigger for the process Returns ------- str empty string """ return ''
07fd797d646e00b3712764bddf414b946c0b8365
689,967
import json def format_report(jsn): """ Given a JSON report, return a nicely formatted (i.e. with indentation) string. This should handle invalid JSON (as the JSON comes from the browser/user). We trust that Python's json library is secure, but if the JSON is invalid then we still want to be able to display it, rather than tripping up on a ValueError. """ if isinstance(jsn, bytes): jsn = jsn.decode('utf-8') try: return json.dumps(json.loads(jsn), indent=4, sort_keys=True, separators=(',', ': ')) except ValueError: return "Invalid JSON. Raw dump is below.\n\n" + jsn
811c1d0a490ff48463abd40800330dafeffd9fcd
689,969
import json def readfile(filename): """ Read JSON from file and return dict """ try: with open(filename, 'r') as f: return json.load(f) except IOError: print('Error while reading from file')
00fbfe4b302c1ee94000dfae100b8ce1d5a6acd2
689,970
def signed_leb128_encode(value: int) -> bytes: """Encode the given number as signed leb128 .. doctest:: >>> from ppci.utils.leb128 import signed_leb128_encode >>> signed_leb128_encode(-1337) b'\xc7u' """ data = [] while True: byte = value & 0x7F value >>= 7 # This is an arithmatic shift right # Extract the current sign bit sign_bit = bool(byte & 0x40) # Check if we are done if (value == 0 and not sign_bit) or (value == -1 and sign_bit): # We are done! data.append(byte) break else: data.append(byte | 0x80) return bytes(data)
155eb5e6b9507dacfb7ca0381512f703819b8f15
689,971
import hashlib def generage_sha1(text: str) -> str: """Generate a sha1 hash string Args: text (str): Text to generate hash Returns: str: sha1 hash """ hash_object = hashlib.sha1(text.encode('utf-8')) hash_str = hash_object.hexdigest() return hash_str
3df62f7607db571b45e82d18e756861c84699513
689,973
def flatten_list(nested_list, list_types=(list, tuple), return_type=list): """Flatten `nested_list`. All the nested lists in `nested_list` will be flatten, and the elements in all these lists will be gathered together into one new list. Parameters ---------- nested_list : list | tuple The (maybe) nested list to be flatten. list_types : tuple[type] Types to be regarded as lists. (default is `(list, tuple)`) return_type : type The returning list type. (default is `list`) """ ret = [] stack = [nested_list] while stack: top = stack.pop() if isinstance(top, list_types): stack.extend(reversed(top)) else: ret.append(top) return return_type(ret)
d5198fd51b9dedb0dcdf4c9fdb39f7b10288b898
689,975
def encrypt(msg, a, b, k): """ encrypts message according to the formula l' = a * l + b mod k """ encrypted_message = "" for letter in msg: if letter == " ": encrypted_message += " " else: encrypted_letter_index = (a * (ord(letter) - ord('a')) + b) % k encrypted_message += chr(encrypted_letter_index + ord('a')) return encrypted_message
73e1610162286704b696cc1a3fc4fd1e21f04e19
689,978
def decimal_to_base(n, base): """Convert decimal number to any base (2-16)""" chars = "0123456789ABCDEF" stack = [] is_negative = False if n < 0: n = abs(n) is_negative = True while n > 0: remainder = n % base stack.append(remainder) n = n // base result = "" while stack: result = result + chars[stack.pop()] if is_negative: return "-"+result else: return result
5d94af3d37eaa3d18e3e6f56e0c278668ce118e1
689,980
def split_output(cmd_output): """Function splits the output based on the presence of newline characters""" # Windows if '\r\n' in cmd_output: return cmd_output.strip('\r\n').split('\r\n') # Mac elif '\r' in cmd_output: return cmd_output.strip('\r').split('\r') # Unix elif '\n' in cmd_output: return cmd_output.strip('\n').split('\n') # If no newline return cmd_output
174ccb65aee8e9225672f2d0d84661624c77c875
689,981
def _parse_bool(value): """Parse a boolean string "True" or "False". Example:: >>> _parse_bool("True") True >>> _parse_bool("False") False >>> _parse_bool("glorp") Traceback (most recent call last): ValueError: Expected 'True' or 'False' but got 'glorp' """ if value == 'True': return True if value == 'False': return False raise ValueError("Expected 'True' or 'False' but got {!r}".format(value))
80a424834b4b2abf338cdc4e4ee6a9028cb460bf
689,987
import struct def one_byte_array(value): """ Convert Int to a one byte bytearray :param value: value 0-255 """ return bytearray(struct.pack(">B", value))
e49ca6d85bfddb85f9132eb7fbaf3e2f1709bd2e
689,989
def get_column_widths(columns): """Get the width of each column in a list of lists. """ widths = [] for column in columns: widths.append(max([len(str(i)) for i in column])) return widths
8c6a86f58d214b4270adefeb2e2d942c787fc2c0
689,990
def filter_arglist(args, defaults, bound_argnames): """ Filters a list of function argument nodes (``ast.arg``) and corresponding defaults to exclude all arguments with the names present in ``bound_arguments``. Returns a pair of new arguments and defaults. """ new_args = [] new_defaults = [] required_args = len(args) - len(defaults) for i, arg in enumerate(args): if arg.arg not in bound_argnames: new_args.append(arg) if i >= required_args: new_defaults.append(defaults[i - required_args]) return new_args, new_defaults
d6346cfdc7a8579411223f11fcfc946dc4ac4a10
689,991
import string def clean_string(s): """Function that "cleans" a string by first stripping leading and trailing whitespace and then substituting an underscore for all other whitepace and punctuation. After that substitution is made, any consecutive occurrences of the underscore character are reduced to one occurrence. Finally, the string is converted to lower case. Returns the cleaned string. Input Parameters: ----------------- s: The string to clean. """ to_sub = string.whitespace + string.punctuation trans_table = str.maketrans(to_sub, len(to_sub) * '_') fixed = str.translate(s.strip(), trans_table) while True: new_fixed = fixed.replace('_' * 2, '_') if new_fixed == fixed: break fixed = new_fixed return fixed.lower()
5f8b9e470c40682da16218e7fa075efb296044c3
689,992
def error_dict(error_message: str): """Return an error dictionary containing the error message""" return {"status": "error", "error": error_message}
d13f21b5620f4eeebf59fef03ee493472f5cf3e5
689,995
import zipfile def listing(zip_path): """Get list of all the filepaths in a ZIP. Args: zip_path: path to the ZIP file Returns: a list of strings, the ZIP member filepaths Raises: any file i/o exceptions """ with zipfile.ZipFile(zip_path, "r") as zipf: return zipf.namelist()
702efd93a2ba6cd462678e493eccbea6829cb28f
689,996
import math def all_possible_combinations_counter(subset_size, set_size): """ Return a number (int) of all possible combinations of elements in size of a subset of a set. Parameters ------- subset_size: int Size of the subset. set_size: int Size of the whole set. Returns ------- int Number of all combinations. """ f = math.factorial return f(set_size) / f(subset_size) / f(set_size - subset_size)
f16760658ac5dc43096cf824a9a77bad38946f77
690,001
def read_weights(nnf_path): """ Format: c weights PW_1 NW_1 ... PW_n NW_n :param nnf_path: Path to NNF file :return: list of weights """ weight_str = None with open(nnf_path, "r") as ifile: for line in ifile.readlines(): if "c weights " in line: weight_str = line[10:].strip() break if weight_str is None: return None weight_strs = weight_str.split(" ") weights = [float(w) for w in weight_strs] return weights
fa1ba187bb8b2d9055610640f794ee3d9d09554f
690,002
import uuid def validate_id_is_uuid(input_id, version=4): """Validates provided id is uuid4 format value. Returns true when provided id is a valid version 4 uuid otherwise returns False. This validation is to be used only for ids which are generated by barbican (e.g. not for keystone project_id) """ try: value = uuid.UUID(input_id, version=version) except Exception: return False return str(value) == input_id
fbc8678ed2c326cce56e905ca9e6f9728f1571c8
690,005
import copy def with_base_config(base_config, extra_config): """Returns the given config dict merged with a base agent conf.""" config = copy.deepcopy(base_config) config.update(extra_config) return config
5101b143d459ea127e1cef14849199236a99006a
690,006
def get_subtext(soup): """Gets the subtext links from the given hacker news soup.""" subtext = soup.select(".subtext") return subtext
f4b34e0a24f47f3332906ba8b69b07593becca04
690,009
def players_in_tournament(t_body): """Get number of players in a tournament Args: t_body (element.tag) : tourn table body. Child of ResponsiveTable Returns: number of players """ players = t_body.find_all("tr", class_="Table__TR Table__even") if players is not None: num_players = len(players) return num_players
5a8918a317b30aaf8523364ef28b7771d60e2fb9
690,011
def PyEval_GetBuiltins(space): """Return a dictionary of the builtins in the current execution frame, or the interpreter of the thread state if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is not None: w_globals = caller.get_w_globals() w_builtins = space.getitem(w_globals, space.newtext('__builtins__')) if not space.isinstance_w(w_builtins, space.w_dict): w_builtins = w_builtins.getdict(space) else: w_builtins = space.builtin.getdict(space) return w_builtins # borrowed ref in all cases
1f2cf2f807c3ed6a73183481bb08452f84c92bdc
690,012
def map_hostname_info(hostname, nmap_store): """Map hostname if there is one to the database record.""" if hostname is not None: nmap_store["hostname"] = hostname.get('name') return nmap_store nmap_store["hostname"] = None return nmap_store
ecab1c241f1785dbc52f1dbc9ad6a3a8fddf618b
690,013
def sum_ascii_values(text: str) -> int: """Sum the ASCII values of the given text `text`.""" return sum(ord(character) for character in text)
7a45396e528c6e2d6c54b611f18d0cf648e418c8
690,015
from typing import MutableMapping from typing import Any def get_dict_item_with_dot(data: MutableMapping, name: str) -> Any: """Get a dict item using dot notation >>> get_dict_item_with_dot({'a': {'b': 42}}, 'a') {'b': 42} >>> get_dict_item_with_dot({'a': {'b': 42}}, 'a.b') 42 """ if not name: return data item = data for key in name.split('.'): item = item[key] return item
5822837ee608ecb2244ae0205aeeb0e4b92cc194
690,018
def _has_exclude_patterns(name, exclude_patterns): """Checks if a string contains substrings that match patterns to exclude.""" for p in exclude_patterns: if p in name: return True return False
72c0401e1e7073a2ca42e1f99b98e4a1ab834bd3
690,023