content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def batch_project(xyz_tensor, K): """ Project a point cloud into pixels (u,v) given intrinsic K [u';v';w] = [K][x;y;z] u = u' / w; v = v' / w :param the xyz points :param calibration is a torch array composed of [fx, fy, cx, cy] ------- :return u, v grid tensor in image coordinate (tested through inverse project) """ B, _, H, W = xyz_tensor.size() batch_K = K.expand(H, W, B, 4).permute(2,3,0,1) x, y, z = torch.split(xyz_tensor, 1, dim=1) fx, fy, cx, cy = torch.split(batch_K, 1, dim=1) u = fx*x / z + cx v = fy*y / z + cy return torch.cat((u,v), dim=1)
21346c1e4121ac622a97edae8c16428fb74752ad
689,200
def format_sig(name, args, retv): """ Format method signature with Javap's method definition style. Arguments are: name of method, list of argument types, and type of return value. >>> format_sig('getSomeValue', ['int', 'java.lang.String'], 'org.mydomain.myapp.SomeData[]') u'org.mydomain.myapp.SomeData[] getSomeValue(int, java.lang.String)' """ return u'%s %s(%s)' % (retv, name, ', '.join(args))
5ecb5ed3c38dbf14d5000fb9c8129f785c44bbe1
689,205
import collections def GetTurnStats(game_results): """Returns a histogram of game lengths (in rounds played).""" hist = collections.Counter() for game_result in game_results: hist[game_result.num_rounds_played] += 1 return hist
78a4980ffa3a71d0181499f6448c75fa7d56c422
689,213
def _ParseIssueReferences(issue_ref_list): """Parses a list of issue references into a tuple of IDs added/removed. For example: [ "alpha:7", "beta:8", "-gamma:9" ] => ([ "7", "8" ], [ "9" ]) NOTE: We don't support cross-project issue references. Rather we just assume the issue reference is within the same project. """ added = [] removed = [] for proj in issue_ref_list: parts = proj.split(":") proj_id = parts[1] if len(parts) >= 2 else proj[1:] if proj[0] != "-": added.append(proj_id) else: removed.append(proj_id) return added, removed
a8c8ebea8ebd289c84bd34bfdc064b8b90daf830
689,214
import base64 def write_to_data_uri(s): """ Writes to a uri. Use this function to embed javascript into the dash app. Adapted from the suggestion by user 'mccalluc' found here: https://community.plotly.com/t/problem-of-linking-local-javascript-file/6955/2 """ uri = ( ('data:;base64,').encode('utf8') + base64.urlsafe_b64encode(s.encode('utf8')) ).decode("utf-8", "strict") return uri
5b5281119c25c52da1970293e833c18231c0d26e
689,218
def cart2(list1: list, list2: list) -> list: """Cartesian product of two lists. :param list list1: input list 1 :param list list2: input list 2 :return: a new list contains all Cartesian products of the two lists. :rtype: list >>> cart2(['a','b'], [1,2]) [['a',1],['a',2],['b',1], ['b',2]] """ def aux(list1: list, list2: list, accum: list) -> list: if len(list1) == 0 or len(list2) == 0: # base case return accum elif len(list1) == 1: # start to traverse list2 return aux(list1, list2[1:], accum + [[list1[0], list2[0]]]) else: return aux(list1[1:], list2, aux([list1[0]], list2, accum)) return aux(list1, list2, [])
3deb6106b36dc81ed2b8b4251290c687b591c157
689,219
def get_sentences(docs, min_words): """ Given a set of documents we extract all sentences that pass a minimum word threshold ARGS: docs(list of Documents), min_words(int) Returns: sentences(list of Sentences) """ sentences = [] [sentences.extend(doc.get_filtered_sentences(min_words)) for doc in docs] return sentences
0f45ecf212d39daa61038a35352def526bb53c2d
689,221
import re def solution(s): """ Complete the solution so that it splits the string into pairs of two characters. If the string contains an odd number of characters then it should replace the missing second character of the final pair with an underscore ('_'). """ if not len(s) % 2 == 0: s += "_" pattern = r"(?P<search>[a-z_]{2})" matches = re.finditer(pattern, s) pairs = [el.group() for el in matches] return pairs
69c1649ca997a98ffdb382d66c26a2c8ce9eeea1
689,222
def compute_conv_output_dim(ifm_dim, k, stride, total_pad=0, dilation=1): """Returns spatial output dimension size for convolution with given params. total_pad gives the total amount of padding along the entire axis (both sides included). """ if ifm_dim == 1: # indicates dummy dimension, keep as-is out_dim = 1 else: out_dim = int(((ifm_dim + total_pad - dilation * (k - 1) - 1) / stride) + 1) return out_dim
cb401bd2dd6fdce26a5c6b7796edbacdac570237
689,229
import re def find_str(string: str, pattern: str) -> list: """ Find all indices of patterns in a string Parameters ---------- string : str input string pattern : str string pattern to search Returns ------- ind : list list of starting indices """ if not pattern.isalpha(): # if the pattern contains non-alphabetic chars such as * pattern = "\\" + pattern ind = [m.start() for m in re.finditer(pattern, string)] return ind
e0ec90edfb4c7ea55ee58dd184b67b5966c2728e
689,233
from typing import Dict from typing import List def _continuous_columns(feature_types: Dict) -> List[str]: """ Parameters ---------- feature_types : Dict Column name mapping to list of feature types ordered by most to least relevant. Returns ------- List[str] List of columns that have continuous or ordinal in the feature type list. Note ____ if a column has both ordinal/continuous and categorical pick whichever comes first. """ continuous_cols = [] for col in feature_types: for feature_type in feature_types[col]: if feature_type == "continuous" or feature_type == "ordinal": continuous_cols.append(col) break if feature_type == "category": break return continuous_cols
d21b02f949a5c658defeaaa67a0abec916373f0d
689,242
import builtins def any(iterable, pred): """Returns True if ANY element in the given iterable is True for the given pred function""" return builtins.any(pred(x) for x in iterable)
eb90ad4fdd55432705a1c8e1fa5159c9e9e7081e
689,244
def tri_ravel(l, m1, m2): """Ravel indices for the 'stack of triangles' ordering.""" # m1 must be >= m2 if m1 < m2 or m1 > l or m2 > l or m1 < 0 or m2 < 0: raise ValueError("Invalid indices") base = l * (l + 1) * (l + 2) // 6 offset = (l - m1) * (l + 3 + m1) // 2 + m2 ind = base + offset return int(ind)
c7cb59e4c7d1972b5da793ce6dd19cf545c510d7
689,245
def _create_dictionary(sequences): """ Create id/token mappings for sequences. :param sequences: list of token sequences :return: mappings from id to token, and token to id """ tokens = {} for s in sequences: for token in s: tokens[token] = tokens.get(token, 0) + 1 sorted_tokens = sorted(tokens.items(), key=lambda x: -x[1]) # inverse sort id2token = [] token2id = {} for i, (t, _) in enumerate(sorted_tokens): id2token.append(t) token2id[t] = i return id2token, token2id
50bca05f76522f93e5199cbdb009c638ced4890f
689,247
import lzma import json def raw_numpy_to_object(numpy_bytes_array): """Convert numpy array to a Python object. Args: numpy_bytes_array: a numpy array of bytes. Returns: Return a Python object. """ return json.loads( lzma.decompress(numpy_bytes_array.tobytes()).decode('utf-16'), )
99f99495c38a3f8bb47d02ae9b04e37957c39263
689,249
def repeated(pattern, sep, least=1, most=None): """ Returns a pattern that matches a sequence of strings that match ``pattern`` separated by strings that match ``sep``. For example, for matching a sequence of ``'{key}={value}'`` pairs separated by ``'&'``, where key and value contains only lowercase letters:: repeated('[a-z]+=[a-z]+', '&') == '[a-z]+=[a-z]+(?:&[a-z]+=[a-z]+)*' Args: pattern (str): a pattern sep (str): a pattern for the separator (usually just a character/string) least (int, positive): minimum number of strings matching ``pattern``; must be positive most (Optional[int]): maximum number of strings matching ``pattern``; must be greater or equal to ``least`` Returns: a pattern """ if least <= 0: raise ValueError('least should be positive; it is: %d' % least) if most is not None: if most < 2: raise ValueError('it does not make any sense to call this function with most<2:\n' 'for most=1, you could just write the <pattern> argument') if most < least: raise ValueError('most must be greater or equal to least') least_s = str(least - 1) if least > 1 else '' most_s = str(most - 1) if most else '' if most and least == most: if least == 2: return pattern + sep + pattern reps = '{%s}' % least_s else: reps = '{%s,%s}' % (least_s, most_s) if reps == '{,}': reps = '*' elif reps == '{1,}': reps = '+' elif reps == '{,1}': reps = '?' return ('{pattern}(?:{sep}{pattern}){reps}' .format(pattern=pattern, sep=sep, reps=reps))
7eb857c423e06e8cddd6d9e859b872911c7b19b8
689,250
def norm_rows(X, stats=None): """ Normalize the rows of the data. X is an M-by-N array to normalize. It is modified in-place. The data is not returned. If stats is given it must be a sequence of 2 M-length arrays for the min and max statistics to normalize by instead of calculating them from the data itself. Returns the statistics used to normalize. If stats is given, it is just a tuple of the 2 values given. Otherwise it is the values calcuated from the data itself. """ # Note: originally this normalized by the mean and standard deviation first like: # Xmean,Xstd = X.mean(1),X.std(1,ddof=1) # Xstd[Xstd == 0] = 1 # X -= Xmean[:,None] # X /= Xstd[:,None] # However those calculations are actually useless if just re-normalizing. Xmin,Xmax = (X.min(1),X.max(1)) if stats is None else stats X -= Xmin[:,None] D = Xmax - Xmin D[D == 0] = 1 X /= D[:,None] return Xmin,Xmax
9652ba96b164f1b7f9dd0c706cd75c077fd9a695
689,251
def createboard(rows,columns): """ Creates a string given rows and columns desired that can be converted into a matrix through numpy >>> createboard(5,4) '0,0,0,0,0; 0,0,0,0,0; 0,0,0,0,0; 0,0,0,0,0' >>> createboard(3,7) '0,0,0; 0,0,0; 0,0,0; 0,0,0; 0,0,0; 0,0,0; 0,0,0' """ row_size = '' for rows in range(rows): if rows == 0: row_size = row_size + '0' else: row_size = row_size + ',0' fullmatrix = '' for cols in range(columns): if cols == 0: fullmatrix = fullmatrix + row_size else: fullmatrix = fullmatrix + '; ' + row_size return fullmatrix
2658516d95636242d7c8de49cc76f7c0f1217700
689,252
def make_histogram(s): """Make a map from letters to number of times they appear in s. s: string Returns: map from letter to frequency """ hist = {} for x in s: hist[x] = hist.get(x, 0) + 1 return hist
d5b3b5eb58eda71f87dafa9b702cf39366e74560
689,253
import time def profile(func): """profile execution time of provided function""" func_start = time.time() func() func_end = time.time() func_delta = func_end - func_start label = str(func).split()[4].split('.')[2] return f"'{label}' passed in {func_delta:.2f}s"
3e81f4f656c375f341d2d31b3d95a8662bad5dcb
689,257
import types def rebuild_code_object(co, code=None, constants=None, filename=None): """Rebuild the code object.""" code = code or co.co_code constants = tuple(constants or co.co_consts) filename = filename or co.co_filename params = [co.co_argcount, co.co_kwonlyargcount, co.co_nlocals, co.co_stacksize, co.co_flags, code, constants, co.co_names, co.co_varnames, filename, co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars, co.co_cellvars] if hasattr(co, "co_posonlyargcount"): # PEP570 added "positional only arguments" in Python 3.8 params.insert(1, co.co_posonlyargcount) return types.CodeType(*params)
98ee5bb2f106790291dfdb0b51745fcc125a703b
689,259
import time def time2int(time_struct, format24=False): """Convert time, passed in as a time.struct_time object, to an integer with hours in the hundreds place and minutes in the units place. Returns 24 hour format if format24 is True, 12 hour format (default) otherwise. """ if not isinstance(time_struct, time.struct_time): return None h = time_struct.tm_hour m = time_struct.tm_min if not format24: h = h if h <= 12 else h - 12 return h*100+m
883b0f7656a354b3a0975b9e15a4637659751080
689,261
def error_response(message): """ Construct error response for API containing given error message. Return a dictionary. """ return {"error": message}
677912817c91725c7079b538bfdf915eb21f3fa0
689,262
def Slope(pt1, pt2): """ Calculates the slope of the line connecting 2 points. :param `pt1`: an instance of `wx.Point`; :param `pt2`: another instance of `wx.Point`. """ y = float(pt2.y - pt1.y) x = float(pt2.x - pt1.x) if x: return y/x else: return None
229bc4e08820e549a1c87f27ddf9262bf0af258d
689,270
def is_sorted(items): """Return a boolean indicating whether given items are in sorted order. DONE: Running time: O(n), because the worst-case scenario would require iterating through the entire array once. DONE: Memory usage: O(1), because the input array is modified in-place (if at all).""" # DONE: Check that all adjacent items are in order, return early if so for i in range(len(items) - 1): if items[i] > items[i+1]: return False return True
da5369728821244d7b81ae4926042d3ff6001fcd
689,271
import mpmath def cdf(x, dfn, dfd): """ Cumulative distribution function of the F distribution. `dfn` and `dfd` are the numerator and denominator degrees of freedom, resp. """ if x <= 0: return mpmath.mp.zero with mpmath.mp.extradps(5): x = mpmath.mp.mpf(x) dfn = mpmath.mp.mpf(dfn) dfd = mpmath.mp.mpf(dfd) dfnx = dfn * x return mpmath.betainc(dfn/2, dfd/2, x2=dfnx/(dfnx + dfd), regularized=True)
89d9d2d9278344e1ff50ac9afde415642bd4e472
689,273
def is_iter(obj): """ Checks if an object behaves iterably. Args: obj (any): Entity to check for iterability. Returns: is_iterable (bool): If `obj` is iterable or not. Notes: Strings are *not* accepted as iterable (although they are actually iterable), since string iterations are usually not what we want to do with a string. """ if isinstance(obj, (str, bytes)): return False try: return iter(obj) and True except TypeError: return False
d3ae5833dc3fb0610cbf8f9446b58148843083d8
689,276
def _make_rows(data, headers, defaults={}): """ extract the values from the data based on the headers data is a list of dicts headers is a list of keys defaults is a dict, where the keys are from the headers list returns the data in a list or rows (tabulate data format) """ table_data = [] for o in data: table_data.append([o.get(k, defaults.get(k)) for k in headers]) return table_data
e67f699faf134ccac79b17b807c917f2f363342c
689,277
def sampling(array, interval=1, offset=0): """ Down-sample the input signal with certain interval. input: array: numpy array. The input temporal signal. 1d or with multiple dimensions interval: int. The interval to sample EEG signal. Default is 1, which means NO down-sampling is applied offset: int. Sampling starts from "offset-th" data point return: sampled_array: numpy array. Down-sampled signal """ if len(array.shape) < 2: return array[offset::interval] else: return array[:, :, offset::interval]
440df08b95619f446ee498022d509740e2d75637
689,278
def acquire_page_list_urls_books(soup): """ Take a BeautifulSoup content of a category page. Return a list of the urls of the books in the first page for a unique category. """ page_list_partial_urls_books = map( lambda x: x.a['href'][8:], soup.find_all('h3'), ) page_list_urls_books = map( lambda x: f"http://books.toscrape.com/catalogue{x}", page_list_partial_urls_books, ) return list(page_list_urls_books)
d6ac09b85ce0ca3bef2056615efca413f377f1e0
689,281
def _convert_to_dict(caps_str: str) -> dict: """ Parses the VCP capabilities string to a dictionary. Non continuous capabilities will include an array of all supported values. Returns: Dict with all capabilities in hex Example: Expected string "04 14(05 06) 16" is converted to:: { 0x04: [], 0x14: [0x05, 0x06], 0x16: [], } """ if len(caps_str) == 0: # Sometimes the keys arent found and the extracting of # capabilities returns an empty string. return {} result_dict = {} group = None prev_digit = None for chunk in caps_str.replace("(", " ( ").replace(")", " ) ").split(" "): if chunk == "": continue elif chunk == "(": group = prev_digit elif chunk == ")": group = None else: val = int(chunk, 16) if group is None: result_dict[val] = [] else: result_dict[group].append(val) prev_digit = val return result_dict
baedf62443eeac9bf4baa5a52acb0ec2dcb5a63e
689,283
import torch def pck(x, x_gt, perm_mat, dist_threshs, ns): """ Percentage of Correct Keypoints evaluation metric. :param x: candidate coordinates :param x_gt: ground truth coordinates :param perm_mat: permutation matrix or doubly stochastic matrix indicating correspondence :param dist_threshs: a iterable list of thresholds in pixel :param ns: number of exact pairs. :return: pck, matched num of pairs, total num of pairs """ device = x.device batch_num = x.shape[0] thresh_num = dist_threshs.shape[1] indices = torch.argmax(perm_mat, dim=-1) dist = torch.zeros(batch_num, x_gt.shape[1], device=device) for b in range(batch_num): x_correspond = x[b, indices[b], :] dist[b, 0:ns[b]] = torch.norm(x_correspond - x_gt[b], p=2, dim=-1)[0:ns[b]] match_num = torch.zeros(thresh_num, device=device) total_num = torch.zeros(thresh_num, device=device) for b in range(batch_num): for idx in range(thresh_num): matches = (dist[b] < dist_threshs[b, idx])[0:ns[b]] match_num[idx] += torch.sum(matches).to(match_num.dtype) total_num[idx] += ns[b].to(total_num.dtype) return match_num / total_num, match_num, total_num
75dbd6e34772f9a863fb345d6042aceb68829a12
689,284
def get_line_end( is_last_line: bool, ) -> str: """ Get the characters needed for the end of a row of a TeX table being created by one of the functions below. Note that for some TeX-related reason, we can't put the \\ on the last line. Args: is_last_line: Whether this is the last line of the Returns: The characters needed for the end of the line of the table """ return "" if is_last_line else " \\\\ \n\hline"
501cafe42f9b4aede81678286e4bd6cf3d9525aa
689,286
def change_log_level_console(logger, new_level): """Change the minimum level that will be logged to the console Parameters ---------- logger : logger object As created by "init_root_logger" method new_level : int New minimum level to log to console """ logger.handlers[1].setLevel(new_level) return logger
e52973b71a1ea615dc5c45094b562d15273a63e8
689,288
def writeLabels(label): """Format text to be output by LaTeX.""" return label.replace('_', r'\_')
e7ddf5af2508a65729a018d787356cbd0cc85a75
689,290
def get_public_translation(instance, language_slug): """ This tag returns the most recent public translation of the requested content object in the requested language. :param instance: The content object instance :type instance: ~integreat_cms.cms.models.pages.page.Page, ~integreat_cms.cms.models.events.event.Event or ~integreat_cms.cms.models.pois.poi.POI :param language_slug: The slug of the requested language :type language_slug: str :return: The translation object of the requested instance :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation, ~integreat_cms.cms.models.events.event_translation.EventTranslation, or ~integreat_cms.cms.models.pois.poi_translation.POITranslation """ return instance.get_public_translation(language_slug)
f70d3dc2ede333e34f92b4010d1bf33d9032b5d2
689,291
def ext_checker(fname, ext): """Replaces the extension of fname with ext, or adds ext to fname if fname doesn't already have an extension.""" ext_begin = -len(fname) for ind in range(1, len(fname) + 1): if fname[-ind] == ".": ext_begin = ind break return fname[:-ext_begin] + "." + ext
e68f75c06ae21594cea220bba4f61288aeae4893
689,294
def asa_scp_handler(ssh_conn, cmd='ssh scopy enable', mode='enable'): """Enable/disable SCP on Cisco ASA.""" if mode == 'disable': cmd = 'no ' + cmd return ssh_conn.send_config_set([cmd])
2f8ac7e7df4329c90043a4c0e22d11f6d34444dd
689,299
def progress(status_code): """Translate PROGRESS status codes from GnuPG to messages.""" lookup = { 'pk_dsa': 'DSA key generation', 'pk_elg': 'Elgamal key generation', 'primegen': 'Prime generation', 'need_entropy': 'Waiting for new entropy in the RNG', 'tick': 'Generic tick without any special meaning - still working.', 'starting_agent': 'A gpg-agent was started.', 'learncard': 'gpg-agent or gpgsm is learning the smartcard data.', 'card_busy': 'A smartcard is still working.' } for key, value in lookup.items(): if str(status_code) == key: return value
7dc7010175ff268b0af8db35e02b62a8790683b9
689,301
def per_target(imgs): """Arrange samples per target. Args: imgs (list): List of (_, target) tuples. Returns: dict: key (target), value (list of data with this target) """ res = {} for index in range(len(imgs)): _, target = imgs[index] if target not in res: res[target] = [] res[target].append(index) return res
27a708a30490dab972f97f9e5fb831741de9d099
689,305
from datetime import datetime import time def uptime(since): """Turn an date and time into an uptime value. The returned value is a number of seconds from the provided value to the current time. The date/time provided is expected to be a local time using the following format: 2017-01-10 16:32:21. """ fr = datetime(*time.strptime(since, "%Y-%m-%d %H:%M:%S")[:6]) to = datetime.now() delta = to - fr delta = int(delta.total_seconds()) if delta < 0: return 0 return delta
b9dddfb3b99d9dafb32d9cdc3ee4e8710bde4f0e
689,306
def interpret_word_seg_results(char_seq, label_seq): """Transform model output into user-friendly contents. Example: In CWS, convert <BMES> labeling into segmented text. :param char_seq: list of string, :param label_seq: list of string, the same length as char_seq Each entry is one of ('B', 'M', 'E', 'S'). :return output: list of words """ words = [] word = "" for char, label in zip(char_seq, label_seq): if label[0] == "B": if word != "": words.append(word) word = char elif label[0] == "M": word += char elif label[0] == "E": word += char words.append(word) word = "" elif label[0] == "S": if word != "": words.append(word) word = "" words.append(char) else: raise ValueError("invalid label {}".format(label[0])) return words
9c83a19360b9498bb57df80cc257b0a5333593c0
689,307
import importlib def try_import(path, keys=None, _as=True): """Try to import from a module. Parameters ---------- path : str Path to module or variable in a module keys : str or list[str], optional Keys to load from the module _as : bool, defualt=True If False, perform recursive assignement as in >> # equivalent to: `import pack.sub.mod` >> pack = try_import('pack.sub.mod', _as=False) Else, it will look like a renamed import: >> # equivalent to: `import pack.sub.mod as my_mod` >> my_mod = try_import('pack.sub.mod', _as=True) Returns ------- loaded_stuff : module or object or tuple A tuple is returned if `keys` is a list. Return None if import fails. """ # check if the base package exists pack = path.split('.')[0] try: __import__(pack) except ImportError: if keys is None or isinstance(keys, str): return None else: keys = list(keys) return [None]*len(keys) if _as: # import a module module = importlib.import_module(path) # optional: extract attributes if keys is not None: if isinstance(keys, str): return getattr(module, keys) else: return tuple(getattr(module, key) for key in keys) return module else: # recursive import path = path.split('.') mod0 = importlib.import_module(path[0]) cursor = mod0 for i in range(1, len(path)): mod1 = importlib.import_module('.'.join(path[:i+1])) setattr(cursor, path[i], mod1) cursor = getattr(cursor, path[i]) return mod0
10ad9c39b4d8813d0f056717f05365a28b54ab21
689,316
def normalize_path_for_settings(path, escape_drive_sep=False): """ Normalize a path for a settings file in case backslashes are treated as escape characters :param path: The path to process (string or pathlib.Path) :param escape_drive_sep: Option to escape any ':' driver separator (windows) :return: The normalized path """ if isinstance(path, str): processed = path else: processed = str(path.resolve()) processed = processed.replace('\\', '/') if escape_drive_sep: processed = processed.replace(':', '\\:') return processed
d48d1bb5dc6ddddeb03dfc3c938451d5d0c05e48
689,317
def round_robin(w) -> bool: """Implements a round-robin association strategy where iots are associated their ssid modulo number of APs. Return: Returns true on successful association. False otherwise. """ m = len(w.aps) i = 0 for device in w.iots: if device.do_associate(w.aps[i%m]) == False: return False i += 1 return True
03cb6ea5eac30ff2cad676d9e55388403cd955df
689,318
def check_dict_is_contained_in_another(filter_data: dict, data: dict) -> bool: """ Check if a dict is contained in another one. * Works with nested dicts by using _dot notation_ in the filter_data keys so a filter with `{"base_key.sub_key": "value"}` will look for dicts containing `{"base_key": {"sub_key": "value"}}` """ for key, value in filter_data.items(): if key in data: if data[key] != value: return False continue if "." in key: base_key, sub_key = key.split(".", maxsplit=1) if base_key not in data: return False base_value = data[base_key] if not isinstance(base_value, dict): return False if not check_dict_is_contained_in_another({sub_key: value}, base_value): return False continue return False return True
87eefc13b09edeb21e0a6097a09c12ea8d650545
689,323
def inside_bounding_box(bb_minlat, bb_minlon, bb_maxlat, bb_maxlon, start_lat, start_lon, end_lat, end_lon): """ Check if two given sets of coordinates (start_lat, start_lon) and (end_lat, end_lon) are within a bounding box (bb_minlat, bb_minlon, bb_maxlat, bb_maxlon) Examples: >>> inside_bounding_box(50.7777, 4.2359, 50.9204, 4.5216, 50.866232, 4.327700, 50.896571, 4.428547) True """ return (bb_minlat <= start_lat <= bb_maxlat and bb_minlon <= start_lon <= bb_maxlon) and \ (bb_minlat <= end_lat <= bb_maxlat and bb_minlon <= end_lon <= bb_maxlon)
817c26ecc4376d3472298408157d39465e9dec80
689,325
import typing def get_destination_for_notebook(relative_path: str) -> typing.Any: """ Get the destination for a notebook. The destination can be of a type of your choice, but it must be serializable with itsdangerous. It will be used by the other methods defining local behavior to determine where to store the notebook and how to access the notebook via your JupyterHub instance. :param relative_path: the relative path, based on the template path :return: the destination where the notebook should be saved """ return { 'relative': relative_path }
62b248e6ce0cccff3b02ee83b301d527c259048d
689,328
def _update_change_dec(func): """ Decorator to track property changes in class. Apply @property.setter after @_update_change_dec :param func: function object to decorate :return: decorated function """ def _wrapper(self, val): self._changed = True func(self, val) return _wrapper
e37ab2ebb787af2d8f04284ef40c1fcde327dee3
689,329
def nice_pypy_mem(mem: str) -> str: """Improves formatting of the memory statistics produced by pypy's garbage collector (which are provided as strings)""" return mem.replace("KB", " KiB").replace("MB", " MiB").replace("GB", " GiB")
3c6e30cba768a8f0d3932be133ba12428c9935af
689,333
def swap_short_telos_group_number(row): """ swaps all group 1 patients to group 2 & all group 2 patients to group 1 this is done purely for visualization reasons and has no impact whatsoever on interpretation of results; just swapping #s for interpretability """ if row == 1: row = 2 elif row == 2: row =1 return row
e2f7c99d8f31f9a130b67be72c285c7acb50364c
689,334
def calculate_board_dim(img_mat, start_pos): """ Calculates the length of one side of the board (all info we need to find specific tiles) """ x, y = start_pos dim = 0 # size of one side of the chess board while img_mat[x, y, 0] != 0 and img_mat[x, y, 1] != 0 and img_mat[x, y, 0] != 0: dim += 1 x += 1 # move one pixel to the right until we hit black one return dim
82b194167232f94a3ca61c7630d94ef37eca4c5e
689,336
def add_fdcuk_meta_data(df, filepath): """ Accepts a dataframe and a filepath parses the filepath to get strings representing nation, league, and season writes this data to the dataframe Returns the changed dataframe """ season = filepath.stem str_filepath = str(filepath) str_filepath_parts = str_filepath.split('/') nation = str_filepath_parts[11] league = str_filepath_parts[12] df['nation'] = nation df['league'] = league df['season'] = season return df
a0bfafc3f0d127fa944e71988bf8e864d712da33
689,343
def can_link_to(person, contact, model): """ Determines whether or not the specified person can form a social network link to the specified contact. :param person: The person wanting to link. :param contact: The contact to determine link eligibility. :param model: The model to use. """ if model.require_mutual and \ len(contact.contacts) >= contact.max_contacts and \ not person.unique_id in contact.contacts: return False return True
8250b3d2ff7fb5be2cb47e83caa99c827517ea79
689,344
def should_show_entry_state(entry, current_api_id): """Returns wether or not entry state should be shown. :param entry: Contentful entry. :param current_api_id: Current API selected. :return: True/False """ return ( current_api_id == 'cpa' and ( entry.__dict__.get('draft', False) or entry.__dict__.get('pending_changes', False) ) )
30856978bb38b598354e67ed8cf9a0568af448db
689,347
def parse_args_and_kwargs(cmdline): """ cmdline: list returns tuple of: args (list), kwargs (dict) """ # Parse args and kwargs args = [] kwargs = {} if len(cmdline) > 1: for item in cmdline[1:]: if '=' in item: (key, value) = item.split('=', 1) kwargs[key] = value else: args.append(item) return (args, kwargs)
9f57d3ea9ad5780d0319e37c81c75228de504ddc
689,354
def isodate(dt): """Takes a date, returns ISO8601 date format""" return dt.strftime('%Y-%m-%d')
9f29806f8473db762bbbdb7bb99da56ba527fd4e
689,355
def _cell_index(dataframe, template='phderi'): """Return cell index (column, row) of first value on pivot. Parameters ---------- dataframe : DataFrame Raw dataframe imported from excel template : str, optional Template, by default 'phderi' Returns ------- list Return [column index, row index] Raises ------ Exception Not match with template. """ template_check = {'phderi': ['Jan', 'Feb'], 'pdderi': ['JAN', 'FEB']} target, check = template_check[template] cell_index = [] for column in dataframe: target_status = dataframe[column].astype(str).str.contains( '^' + target + '$' ).sum() if target_status: cell_index.append(column) break column = cell_index[0] row_target = dataframe[column] == target cell_index.append(dataframe[column] [row_target].index.values.astype(int)[0]) column, row = cell_index[0], cell_index[1] if dataframe.iloc[row, column + 1] == check: return cell_index else: raise Exception( 'Template tidak sesuai dengan {template}'.format( template=template) )
3d290b0f5ff12077d903f979ce9c9b42de37f44b
689,357
def splitIntoTetrahedra(poly = "const PolyClipperPolyhedron&", tol = ("const double", "0.0")): """Split a PolyClipper::Polyhedron into tetrahedra. The result is returned as a vector<vector<int>>, where each inner vector is a set of four ints representing vertex indices in the input Polyhedron.""" return "std::vector<std::vector<int>>"
1399e39cf54b9da0b597d9e0cf06728b467c4159
689,365
def add_members_to_policy(role: str, iam_policy: list, members: list, command_name: str) -> list: """ Append members to policy role members. Args: role (str): The name of policy role. iam_policy (list): IAM policies. members (list): Members to append to policy. command_name (str): An alternative command that will be displayed to the user in case of an error. Returns: list: Updated policies. """ role_found = False for policy in iam_policy: if policy.get('role') == role: policy['members'].extend(members) role_found = True break if not role_found: raise Exception('The provided role is not part of the project IAM policies.' f'If you wish to add a new policy, consider using the {command_name} command.') return iam_policy
c8faa020b8349f0ad9a8f41e48b8ee26cc8db0ec
689,368
def _is_decoy_suffix(pg, suffix='_DECOY'): """Determine if a protein group should be considered decoy. This function checks that all protein names in a group end with `suffix`. You may need to provide your own function for correct filtering and FDR estimation. Parameters ---------- pg : dict A protein group dict produced by the :py:class:`ProtXML` parser. suffix : str, optional A suffix used to mark decoy proteins. Default is `'_DECOY'`. Returns ------- out : bool """ return all(p['protein_name'].endswith(suffix) for p in pg['protein'])
77da0255f7fe9672f502817bd5ab07f0bd6e3159
689,370
import mpmath def calc(algorithm: str, accuracy: int) -> mpmath.mpf: """Return Pi. Args: accuracy (int): accuracy algorithm (str): algorithm by which pi is calcurated Returns: mpmath.mpf: Pi value """ pi: mpmath.mpf = globals()[algorithm].pi(accuracy) # pylint: disable=invalid-name return pi
c94138cec64556702acf82a2573888e4b5340795
689,373
def get_grant_name(grant): """Get grant name based on Grantee type.""" grant_name = "" if grant["Grantee"]["Type"] == "Group": uri = grant["Grantee"]["URI"] grant_name = uri.rsplit("/", 1)[-1] if grant["Grantee"]["Type"] == "CanonicalUser": grant_name = grant["Grantee"]["DisplayName"] return grant_name
ec97f7edaada27d3a2670dbcadc1191f4371626b
689,374
def validate_params(params): """ Validate the parameters passed to KBParallel.run_batch Also refer to the type def for run_batch in KBParallel.spec """ if 'tasks' not in params or not params['tasks']: raise ValueError('"tasks" field with a list of tasks is required.') if 'runner' not in params: raise ValueError('"runner" field is required') if params['runner'] not in ['local_serial', 'local_parallel', 'parallel']: raise ValueError('Unknown or unsupported runner type: ' + str(params['runner'])) # Set some defaults for the concurrent task limits based on the runner type if params['runner'] == 'local_parallel': params.setdefault('concurrent_local_tasks', 2) else: params.setdefault('concurrent_local_tasks', 1) if params['runner'] == 'parallel': params.setdefault('concurrent_njsw_tasks', 3) else: params.setdefault('concurrent_njsw_tasks', 0) if 'concurrent_local_tasks' in params: # Clamp the range of concurrent_local_tasks between 0 and 20 params['concurrent_local_tasks'] = max(0, min(params['concurrent_local_tasks'], 20)) if 'concurrent_njsw_tasks' in params: # Clamp the range of concurrent_njsw_tasks between 0 and 50 params['concurrent_njsw_tasks'] = max(0, min(params['concurrent_njsw_tasks'], 50)) if 'max_retries' in params and params['max_retries'] is not None: # Clamp max retries between 1 and 5 params['max_retries'] = max(1, min(params['max_retries'], 5)) params.setdefault('max_retries', 1) return params
d08fb83588e75892c3fcfef31eb6882f8bdf0f1b
689,376
def check_for_negatives(terms): """ Returns `True` for a list of sympy expressions contains any expressions that are negative. Parameters ---------- terms : list of sympy expressions A list where expressions may be either positive or negative. Returns ------- bool `True` if any negative terms in expression. Otherwise `False` """ any_negs = False for term in terms: if str(term)[0] == '-': any_negs = True return any_negs
9c2ce07ad46f531d094e7da97bba7b2f41197444
689,377
import re def filt(seq, lst): """ filters lst. returns sublist Args: seq: list used to build a regex for matching lst: list Returns: slst: list elements of lst that match at least one element of seq """ regex = "(" + ")|(".join(seq) + ")" regex = re.compile(regex) slst = list(filter(regex.search, lst)) return slst # still need a checkUsername function
06d6403f22a37089e3aad7fa42049d1ee97eaabc
689,381
import re def get_task_name_without_suffix(task_name, variant_name): """Return evergreen task name without suffix added to the generated task. Remove evergreen variant name, numerical suffix and underscores between them from evergreen task name. Example: "noPassthrough_0_enterprise-rhel-80-64-bit-dynamic-required" -> "noPassthrough" """ task_name = task_name if task_name else "" return re.sub(fr"(_[0-9]+)?(_{variant_name})?$", "", task_name)
f4ddda7d8f7d778f15f9f7d81751c0fad4fba444
689,382
def get_decision_sequence(size): """ Get the decision sequence for generating valid cycles with DGMG for teacher forcing optimization. """ decision_sequence = [] for i in range(size): decision_sequence.append(0) # Add node if i != 0: decision_sequence.append(0) # Add edge decision_sequence.append(i - 1) # Set destination to be previous node. if i == size - 1: decision_sequence.append(0) # Add edge decision_sequence.append(0) # Set destination to be the root. decision_sequence.append(1) # Stop adding edge decision_sequence.append(1) # Stop adding node return decision_sequence
32d70e0161ac141d269f0ec5463ad0496fced23f
689,383
def drop_last_month(df): """Drop last month, which might have missing data. For first month, Jan 2012, we have complete data. """ ym = df.transaction_date.dt.to_period('M') return df[ym < ym.max()]
f5a5be8ac8dd5d8b2fe4c40f8d1e99c645699927
689,391
import re def parse_list_str(setting_str): """ Split a string like 'foo, bar' into ('foo', 'bar'). Also handles 'irregular' spacing like "foo ,bar". """ return re.split('\s*,\s*', setting_str)
7c264bf945114d166776f20fe60877d5e6bca1e6
689,392
def _generate_reset_key(user): """Return a reset key for the given User factory stub object.""" return "{0}_reset_key".format(user.name).lower()
8701d398cf530d399fffa498855e69802b373418
689,399
def getFactoryMeritMultiplier(factoryId): """ Returns the skill merit multiplier for a particular factory. factoryId is the factory-interior zone defined in ToontownGlobals.py. """ # Many people complained about how many runs you must make now that # we lowered the cog levels so I have upped this by a factor of two. return 4.
2340ff5b8a8ece16b1fece2dfa02d725acc97a13
689,400
from typing import Optional import math def _seconds_to_frame_index( time_in_seconds: float, fps: int, zero_indexed: Optional[bool] = True ) -> int: """ Converts a point in time (in seconds) within a video clip to its closest frame indexed (rounding down), based on a specified frame rate. Args: time_in_seconds (float): The point in time within the video. fps (int): The frame rate (frames per second) of the video. zero_indexed (Optional[bool]): Whether the returned frame should be zero-indexed (if True) or one-indexed (if False). Returns: (int) The index of the nearest frame (rounding down to the nearest integer). """ frame_idx = math.floor(time_in_seconds * fps) if not zero_indexed: frame_idx += 1 return frame_idx
63b5e3e6631fd6d5e58fb0eef12721df37163e7f
689,401
def cost_part1(crabs: list[int], target: int) -> int: """ Calculates the total fuel that would be required for all crabs to move to the given position if the assumption from part 1 was true """ return sum(abs(target - crab) for crab in crabs)
4df995bdd0dfe90a120186b91eae4a4ff94765ec
689,403
def parse_n_features(n_features, total): """Parse either the `n_features` for forward and backward selection. Namely (a) if `param` is an int, ensure it lies on (0, `total`), (a) if `param` is a float, ensure it lies on (0, 1). Args: n_features : int An `n_features` parameter passed to forward or backward selection. total : int The total features in the data Returns: int * number of features to select. If `n_features` and it lies on (0, `total`), it will be returned 'as is'. """ if isinstance(n_features, int) and not 0 < n_features < total: raise ValueError( "If an int, `n_features` must be on (0, {}).".format( total ) ) if isinstance(n_features, float) and not 0 < n_features < 1: raise ValueError( "If a float, `n_features` must be on (0, 1)." ) if isinstance(n_features, float): return int(n_features * total) else: return n_features
e4c3f27600b088e77543fbd5bad73832a6e1d4b1
689,404
def where(self, test): """ Create a new :class:`.Table` with only those rows that pass a test. :param test: A function that takes a :class:`.Row` and returns :code:`True` if it should be included in the new :class:`.Table`. :type test: :class:`function` :returns: A new :class:`.Table`. """ rows = [] if self._row_names is not None: row_names = [] else: row_names = None for i, row in enumerate(self._rows): if test(row): rows.append(row) if row_names is not None: row_names.append(self._row_names[i]) return self._fork(rows, row_names=row_names)
ef7a8761285b5679a69e9ca87ec6bc23e98c3f60
689,405
def fields_to_batches(d, keys_to_ignore=[]): """ The input is a dict whose items are batched tensors. The output is a list of dictionaries - one per entry in the batch - with the slices of the tensors for that entry. Here's an example. Input: d = {"a": [[1, 2], [3,4]], "b": [1, 2]} Output: res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}]. """ keys = [key for key in d.keys() if key not in keys_to_ignore] # Make sure all input dicts have same length. If they don't, there's a problem. lengths = {k: len(d[k]) for k in keys} if len(set(lengths.values())) != 1: msg = f"fields have different lengths: {lengths}." # If there's a doc key, add it to specify where the error is. if "doc_key" in d: msg = f"For document {d['doc_key']}, " + msg raise ValueError(msg) length = list(lengths.values())[0] res = [{k: d[k][i] for k in keys} for i in range(length)] return res
6896adacfd34754bd49d8b53aff5bc9b1222677c
689,406
def my_import(name): """Function converts a string based import into a module object, used for dynamically importing modules from config. Parameters ---------- :str: `str` A string name of the module to import Returns ------- :obj: `module` A module object converted from the string. """ components = name.split('.') mod = __import__(components[0]) for comp in components[1:]: mod = getattr(mod, comp) return mod
689a38f20aa402db0625c5f5384cb0b88dfe98a2
689,407
def gen_Message(message): """Create a new Message.""" message = { "@type": "Message", "MessageString": message, } return message
13ed53733d3910df7e0bdc3eb5fc3d40a1275390
689,412
def seconds_to_datetime(second: int) -> str: """Convert seconds to datetime string.""" mins, secs = divmod(int(second), 60) hours, mins = divmod(mins, 60) return f'{hours:02d}:{mins:02d}:{secs:02d}'
2d300a71bdbfecb3b3620c8a7b77e778957f89e3
689,413
def safe_int_cast(val, default=0): """Safely casts a value to an int""" try: return int(val) except (ValueError, TypeError): return default
0282539ec33efd8264d7fe2fb09dfb4668f0c942
689,415
def _get_regular_HD_name(name): """Convert an HD name in *Henry Draper Catalogue* to its regular form `"HD NNNN"` or `"HD NNNN C"`. Args: name (str or int): Name or HD number of a star (e.g. `"HD8276"`, `"HD 8276A"`, `8443`). Returns: str: Regular HD name `"HD NNNN"` or `"HD NNNN C"`. See also: * `Henry Draper Catalogue and Extension (III/135A) <http://vizier.u-strasbg.fr/cgi-bin/VizieR?-source=III/135A/catalog>`_ """ if isinstance(name, str): name = name.strip() if name[-1].isalpha(): comp = name[-1] return 'HD %d %s'%(int(name[2:-1]), comp) else: return 'HD %d'%(int(name[2:])) elif isinstance(name, int): return 'HD %d'%name else: raise ValueError
f69f14d1d0e3d1632adb7dc059ac67e5c6e9a1a4
689,416
def or_of_bits(*bits): """OR the given bits. Args: *bits (int): Bits for OR. More than one argument required. Return: or_bit (int): OR of the given bits. Example: >>> or_of_bits(1, 4, 16) 21 # 0b10101, 0x15 >>> or_of_bits(0b00010, 0b10000) 18 # 0b10010, 0x12 >>> or_of_bits(0x01, 0x10) 17 # 0b10001, 0x11 """ assert len(bits) > 1, "more than one argument required" assert all(isinstance(bit, int) for bit in bits), \ "bits: all elements are expected to be 'int'" or_bit = 0 for bit in bits: or_bit |= bit return or_bit
621b71ee4828912da027c63d61b7de669f7029e4
689,417
import yaml def parse_yaml(config): """Safely loads yaml data Args: config: YAML configuration file Returns: Dictionary representation of given YAML """ return yaml.safe_load(config)
17c719e994df6208e6b53913bb8fe19767b3663e
689,418
import torch def apply_distance_bound(data_region_i, data_region_i_orig, args): """ Input: data_region_i: (S,3) tensor, current region i points data_region_i_orig: (S,3) tensor, original region i points Return: data_region_i: modified data_region_i count: number of points that exceed distance bound """ with torch.no_grad(): region_i_diff = data_region_i - data_region_i_orig #(S,3) region_i_diff_distance = torch.norm(region_i_diff, dim=1) #(S,) total_points = region_i_diff_distance.shape[0] count = 0 for i in range(total_points): # check bound if region_i_diff_distance[i] > args.dist_threshold: count += 1 data_region_i[i].data = data_region_i_orig[i].data + args.dist_threshold * region_i_diff[i] / region_i_diff_distance[i] return data_region_i, count
0cf40bfbc83ad3c2104c969bb5faaace6b88d5db
689,419
def attach_disk(fco_api, server_uuid, disk_uuid, index): """ Attach disk to server. :param fco_api: FCO API object :param server_uuid: Server UUID :param disk_uuid: Disk UUID :param index: Disk index on the server :return: Job-compatible object """ return fco_api.attachDisk(serverUUID=server_uuid, diskUUID=disk_uuid, index=index)
ab32f750effef35cae5d5932e941d99f4ce5888e
689,420
def median_zero_normalization(data, normalize='samples'): """ This function normalizes each sample by using its median. :param data: :param str normalize: whether the normalization should be done by 'features' (columns) or 'samples' (rows) :return: Pandas dataframe. Example:: data = pd.DataFrame({'a': [2,5,4,3,3], 'b':[4,4,6,5,3], 'c':[4,14,8,8,9]}) result = median_normalization(data, normalize='samples') result a b c 0 -1.333333 0.666667 0.666667 1 -2.666667 -3.666667 6.333333 2 -2.000000 0.000000 2.000000 3 -2.333333 -0.333333 2.666667 4 -2.000000 -2.000000 4.000000 """ if normalize is None or normalize == 'samples': normData = data.sub(data.median(axis=1), axis=0) else: normData = data.sub(data.median(axis=0), axis=1) return normData
098ed48e1f4337353a662a53a93fd08169982be9
689,423
def sse_pack(event_id: int, event: str, data: int, retry: str = "2000") -> str: """Pack data in Server-Sent Events (SSE) format.""" return f"retry: {retry}\nid: {event_id}\nevent: {event}\ndata: {data}\n\n"
075d7f58248b72a5835c6b9b65332fbcb799f2a8
689,427
def _lyn(w): """ Returns the length of the longest prefix of ``w`` that is a Lyndon word. EXAMPLES:: sage: import sage.combinat.necklace as necklace sage: necklace._lyn([0,1,1,0,0,1,2]) 3 sage: necklace._lyn([0,0,0,1]) 4 sage: necklace._lyn([2,1,0,0,2,2,1]) 1 """ p = 1 k = max(w)+1 for i in range(1, len(w)): b = w[i] a = w[:i] if b < a[i-p] or b > k-1: return p elif b == a[i-p]: pass else: p = i+1 return p
eaec9d87f91b7d87282f7909d5e4dcb1b4ad1daa
689,429
def valid_int_id(int_id: int) -> bool: """Validates an ID value as a signed 32-bit integer used in ID fields in MySQL tables. :param int_id: ID number to validate :return: True or False, based on if the integer falls inclusively between 0 and 2147483647 """ try: if not int_id: return False int_id_ = int(int_id) except ValueError: return False # Minimum value of a signed INT in MySQL/MariaDB is 0 and the # maximum value of signed INT type in MySQL/MariaDB is (2**31) - 1, # or 2147483647 return 0 <= int_id_ <= (2**31 - 1)
1805073a675f12b846b8ca5544d602aaba57e232
689,434
def _build_localename(localetuple): """ Builds a locale code from the given tuple (language code, encoding). No aliasing or normalizing takes place. """ try: language, encoding = localetuple if language is None: language = 'C' if encoding is None: return language else: return language + '.' + encoding except (TypeError, ValueError): raise TypeError( 'Locale must be None, a string, or an iterable of two strings -- language code, encoding.' )
ed267f2e2ef81a7eeddea5a2363cd72c726ddecb
689,439
import torch def train_model(model, optimizer, criterion, epochs, trainloaders, validloaders, device): """ Trains a network with an optimizer according to a criterion over a number of epochs on data provided by trainloaders and validloaders using device. Returns None """ # Push model to cpu or cuda depending on device model = model.to(device) # Prepare containers for losses train_losses = [] valid_losses = [] for e in range(epochs): # Set model to training model.train() # Prepare a variable for the training loss train_loss = 0 for images, labels in trainloaders: # Push images and labels to device images = images.to(device) labels = labels.to(device) # Zero the optimizer's gradient optimizer.zero_grad() # Feedforward the images log_ps = model.forward(images) # Calculate the loss loss = criterion(log_ps, labels) # Backpropagate through the gradient loss.backward() # Optimize optimizer.step() # Keep track of the running_loss train_loss += loss.item() else: # Prepare variables for validation loss and accuracy valid_loss = 0 accuracy = 0 # Turn off the gradient for this step as we aren't training and set model to evaluation with torch.no_grad(): model.eval() for images, labels in validloaders: # Push images and labels to device images = images.to(device) labels = labels.to(device) # Feed the images forward through the model log_ps = model.forward(images) # Check loss and place in validation loss variable valid_loss += criterion(log_ps, labels).item() # Exponentiate to find the softmax ps = torch.exp(log_ps) # Select the top class top_p, top_class = ps.topk(1, dim=1) # Container for equality between model classification and actual labels equals = top_class == labels.view(*top_class.shape) # Calculate the accuracy accuracy += torch.mean(equals.type(torch.FloatTensor)).item() # Append training and validation losses to their containers train_losses.append(train_loss/len(trainloaders)) valid_losses.append(valid_loss/len(validloaders)) # Print loss results and accuracy print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(train_losses[-1]), "Test Loss: {:.3f}.. ".format(valid_losses[-1]), "Test Accuracy: {:.3f}".format(accuracy/len(validloaders))) return None
af2e68b5694c7c49f936e79e53f50478ca9e3b9e
689,444
import math def lcm(a: int, b: int) -> int: """Return lowest common multiple.""" return a * b // math.gcd(a, b)
e40e99b167bf8aa3cfeedf56c5c2082da5ce1498
689,445
import re def validate_url(url, playlist=False): """ Confirms the validity of the provided YouTube video/playlist URL Args: url: A YouTube video/playlist URL playlist: A boolean flag to determine playlist URL Returns: A bool indicating the validity of the provided URL """ pattern = r'^(https?\:\/\/)?(www\.)?(youtube\.com\/watch\?v=([a-zA-Z0-9_\-]{11})|youtu\.?be\/([a-zA-Z0-9_\-]{11}))$' if playlist: pattern = r'^(https?\:\/\/)?(www\.)?youtube\.com\/((playlist\?list=.*)|(watch\?list=.*&v=.*)|(watch\?v=[a-zA-Z0-9_\-]{11}&list=.*))$' return bool(re.match(pattern, str(url)))
3d4242c45ba82a772231b5376149d4cf3211e17b
689,450
def _multiple_of_n(a, n_boundary): """Calculates ceil like operation to make 'a' to be multiple of n boundary.""" ceil_like = (a + n_boundary - 1) // n_boundary return ceil_like * n_boundary
5d61b57d9813703e3ece9e0a60109e5afc03b03e
689,454
def find(pred, iterable): """ Find the first occurrence of the predicate function returning true over the iterable; otherwise None. >>> find(lambda e: e.startswith('g'), ['alpha', 'beta', 'gamma', 'delta']) 'gamma' >>> find(lambda e: e.startswith('p'), ['alpha', 'beta', 'gamma', 'delta']) None """ for element in iterable: if pred(element): return element
f0a491e4c8dfce292b604193fde25f317b0978b8
689,455
def _softUpdateWrapper(wrapper, wrapped): """ Update a wrapper function to look like the wrapped function. Like functools.update_wrapper, but doesn't fail when attributes are not found. """ attrs = ['__name__', '__doc__'] for attr in attrs: if hasattr(wrapped, attr): setattr(wrapper, attr, getattr(wrapped, attr)) return wrapper
4042f7a5db73cab83a741dadbc2f475c56a05d50
689,460
def topological_sort(graph): """ Preforms a topological sort on the given graph. Code by Paul Harrison in the public domain. graph should be a dictionary mapping node names to lists of successor nodes. """ # Count the occurrences of each node as a successor count = { } for node in graph: count[node] = 0 for node in graph: for successor in graph[node]: count[successor] += 1 # Get the 'root' nodes with no successors ready = [ node for node in graph if count[node] == 0 ] # Work through each root node result = [ ] while ready: # Grab a root node to work with node = ready.pop(-1) result.append(node) # Work through the successors for this node for successor in graph[node]: count[successor] -= 1 if count[successor] == 0: ready.append(successor) return result
18d8247287e4c0bfb74eb597ff2d4cc6938e287c
689,462
def _convert_json_properties(properties): """ Convert a geojson expression of the properties of a sector into a list of properties. Properties are in a dictionary. The geojson has no object id field so we inject one. """ return [properties['AC_ID'], properties['AV_AIRSPACE_ID'], properties['AV_ICAO_STATE_ID'], properties['MIN_FLIGHT_LEVEL'], properties['MAX_FLIGHT_LEVEL'], properties['AV_NAME'], properties['SECTOR_TYPE'], '0']
ce6f90ea0a4cdc983b04da853b0ef16d35bec22c
689,463
def _group_by_labels(cbdbscan_topics): """Group all the learned cores by their label, which was assigned in the cluster_model. Parameters ---------- cbdbscan_topics : list of :class:`Topic` A list of topic data resulting from fitting a :class:`~CBDBSCAN` object. After calling .fit on a CBDBSCAN model, the results can be retrieved from it by accessing the .results member, which can be used as the argument to this function. It is a list of infos gathered during the clustering step and each element in the list corresponds to a single topic. Returns ------- dict of (int, list of :class:`Topic`) A mapping of the label to a list of topics that belong to that particular label. Also adds a new member to each topic called num_neighboring_labels, which is the number of neighboring_labels of that topic. """ grouped_by_labels = {} for topic in cbdbscan_topics: if topic.is_core: topic.num_neighboring_labels = len(topic.neighboring_labels) label = topic.label if label not in grouped_by_labels: grouped_by_labels[label] = [] grouped_by_labels[label].append(topic) return grouped_by_labels
43c8a1ed4eab9bd08f8bd4ac4f1df4b5ab1a0ef4
689,468