content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import inspect def caller(n=1): """Return the name of the calling function n levels up in the frame stack. >>> caller(0) 'caller' >>> def f(): ... return caller() >>> f() 'f' """ return inspect.getouterframes(inspect.currentframe())[n][3]
df142a8416da526a3af5a00cd8d473ea35d6bfd5
683,922
import collections def parse_result_ranks(stream): """Reads a results stream: one line per item, expected format: ``qid, iter, docno, rank, sim, run_id``. The output data structure is a dict indexed by (tid, docno) and the values are ranks. """ ranks = collections.defaultdict(int) for i, l in enumerate(stream): tid, _, docno, rank, _, _ = l.strip().split() ranks[(tid, docno)] = int(rank) return ranks
50679336dd9fe0596c091196d7eef5d3bb808775
683,924
def getFormURL(form_id): """ Return a form URL based on form_id """ return 'https://docs.google.com/forms/d/%s/viewform' % (form_id, )
c206e89d7246be95586f51d363490e9124a56a16
683,925
import unicodedata def to_ascii(s): """ Translates the string or bytes input into an ascii string with the accents stripped off. """ if isinstance(s, bytes): s = s.decode('utf-8') return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
fb86e21b66a1abd9acd144cbcb596a2d8835b748
683,927
import torch def rejoin_props(datasets): """ Rejoin properties from datasets into one dictionary of properties. Args: datasets (list): list of smaller datasets Returns: new_props (dict): combined properties """ new_props = {} for dataset in datasets: for key, val in dataset.props.items(): if key not in new_props: new_props[key] = val continue if type(val) is list: new_props[key] += val else: new_props[key] = torch.cat([ new_props[key], val], dim=0) return new_props
f0913486571ea14d5ecf0fab7e9021b7663f3ad7
683,931
def _l_str_ ( self ) : """Self-printout of line: (point, direction) >>> line = ... >>> print line """ return "Line3D(%s,%s)" % ( self.beginPoint() , self.direction() )
816564e5d27a3a2afbc5f0173928dea06585f34a
683,933
def get_mesh_texture(bsp, mesh_index: int) -> str: """Returns the name of the .vmt applied to bsp.MESHES[mesh_index]""" mesh = bsp.MESHES[mesh_index] material_sort = bsp.MATERIAL_SORT[mesh.material_sort] texture_data = bsp.TEXTURE_DATA[material_sort.texture_data] return bsp.TEXTURE_DATA_STRING_DATA[texture_data.name_index]
55db88564032771d2ba8b41513d1b55077809542
683,936
def verify_askingto_by_verify_y(actor, x, y, ctxt) : """Updates the actor for y to x and then verifies that action.""" y.update_actor(x) return ctxt.actionsystem.verify_action(y, ctxt)
09a089ab29042d371849efdcbe16ed6a15720b83
683,937
def create_document(bookmark): """Creates a Document (a dict) for the search engine""" return { "id": str(bookmark.id), "title": bookmark.title or "", "notes": bookmark.notes or "", "tags": ", ".join([tag.name for tag in bookmark.tags]), }
a3af46f6827b725f949056de0372e2d55121874f
683,938
def bytes_find_single(x: bytes, sub: int, start: int, end: int) -> int: """Where is the first location of a specified byte within a given slice of a bytes object? Compiling bytes.find compiles this function, when sub is an integer 0 to 255. This function is only intended to be executed in this compiled form. Args: x: The bytes object in which to search. sub: The subsequence to look for, as a single byte specified as an integer 0 to 255. start: Beginning of slice of x. Interpreted as slice notation. end: End of slice of x. Interpreted as slice notation. Returns: Lowest index of match within slice of x, or -1 if not found. Raises: ValueError: The sub argument is out of valid range. """ if sub < 0 or sub > 255: raise ValueError("byte must be in range(0, 256)") if start < 0: start += len(x) if start < 0: start = 0 if end < 0: end += len(x) if end < 0: end = 0 if end > len(x): end = len(x) index = start while index < end: if x[index] == sub: return index index += 1 return -1
9e94788a4d9b6c102e56cc95422b5f367754b22e
683,939
import csv def get_data_X_y(data_dir, X=[], y=[]): """Read the log file and turn it into X/y pairs.""" with open(data_dir + 'driving_log.csv') as fin: next(fin) log = list(csv.reader(fin)) for row in log: if float(row[6]) < 20: continue # throw away low-speed samples X += [row[0].strip(), row[1].strip(), row[2].strip()] #using center, left and right images y += [float(row[3]), float(row[3]) + 0.3, float(row[3]) - 0.3] #add a small angle to the left camera and subtract a small angle from the right camera return X, y
f906b9fdbb76b3eac59ece9ec1d6d56452dffce2
683,941
def sigfig(number, places): """ Round `number` to `places` significant digits. Parameters: number (int or float): A number to round. places (int): The number of places to round to. Returns: A number """ # Passing a negative int to round() gives us a sigfig determination. # Example: round(12345, -2) = 12300 ndigits = -int(len(str(abs(number)).split('.')[0]) - places) return round(number, ndigits)
0aae9fff082b35a18418b2dae8c6b6787e3c947a
683,944
from datetime import datetime def read_time_stamp (out): """Reads timestamp from a file provided. Format: time.time()""" st_hd = open(out, 'r') st = st_hd.read() st_hd.close() stamp = datetime.fromtimestamp(float(st)).strftime('%Y-%m-%d %H:%M:%S') return(stamp)
1f87cc1b38b03d48d04547f22848a22e0d7b95d6
683,945
import json def load_characters(path_to_backup): """ Characters are saved in a json file. Read and return the saved content as a Python dict. """ with open(path_to_backup, 'r') as content: try: return json.loads(content.read()) except json.JSONDecodeError: pass
f2570fbb0d719b2fe97dc8ea4fe605b99666d3cd
683,947
import hashlib def file_hash(filename): """ Hash the contents of the specified file using SHA-256 and return the hash as a string. @param filename The filename to hash the contents of @return String representing the SHA-256 hash of the file contents """ hasher = hashlib.sha256() with open(filename, 'rb') as infp: while True: data = infp.read(8192) if not data: break hasher.update(data) return hasher.hexdigest()
933c79a20bf56a4953eff2b90ceec698db870ebf
683,948
def unionRect(rect1, rect2): """Return the smallest rectangle in which both input rectangles are fully enclosed. In other words, return the total bounding rectangle of both input rectangles. """ (xMin1, yMin1, xMax1, yMax1) = rect1 (xMin2, yMin2, xMax2, yMax2) = rect2 xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), max(xMax1, xMax2), max(yMax1, yMax2)) return (xMin, yMin, xMax, yMax)
4f81c8e0eeabe181ac9fab439d289ba6f2d923a6
683,953
def build_synthethic_iid_datasets(client_data, client_dataset_size): """Constructs an iterable of IID clients from a `tf.data.Dataset`. The returned iterator yields a stream of `tf.data.Datsets` that approximates the true statistical IID setting with the entirety of `client_data` representing the global distribution. That is, we do not simply randomly distribute the data across some fixed number of clients, instead each dataset returned by the iterator samples independently from the entirety of `client_data` (so any example in `client_data` may be produced by any client). Args: client_data: a `tff.simulation.ClientData`. client_dataset_size: the size of the `tf.data.Dataset` to yield from the returned dataset. Returns: A `tf.data.Dataset` instance that yields iid client datasets sampled from the global distribution. """ global_dataset = client_data.create_tf_dataset_from_all_clients() # Maximum of shuffle of 10,000 items. Limited by the input dataset. global_dataset = global_dataset.shuffle( buffer_size=10000, reshuffle_each_iteration=True) global_dataset = global_dataset.repeat(None) # Repeat forever return global_dataset.window(client_dataset_size)
8e9da5194d0249b837be78315875106227e5564b
683,958
def is_int(s: str) -> bool: """Test if string is int Args: s (str): Input String Returns: bool: result of test """ try: int(s) return True except ValueError: return False
c454ba51fb0dec8cc1b9b4706849ee4c17995234
683,960
from pathlib import Path def _new_file(file: Path) -> Path: """Return the same file path with a .new additional extension.""" return file.with_suffix(f"{file.suffix}.new")
e7adafdecbe4d7fdef53e188895de33ae3b589cd
683,961
def get_users_preferred_timezone(user, service): """ Determines the users preferred timezone by checking what timezone they use for their primary google calendar :param user: Django User object for the user whose calendar is being accessed :return: A string representation of the timezone: - "Etc/GMT+8" """ primary_calendar = service.calendarList().get(calendarId='primary').execute() return primary_calendar['timeZone']
3d5efdf3ad3bfe8acc68bf014d46e0fd83d492e5
683,967
def split_string(command:str, character:str): """ Split incoming command on a character args: command: string that's the command. character: the character on which the command should be split. Returns: Array with at least length 2 containing the split command. """ items = command.split(character) if len(items) == 1: items.append("") return items
fba0813cd318ffe3729399d5c4c87934b688f881
683,972
def offset_stopword(rank, nb_lemmas_in_stopwords): """Offset word frequency rankings by a small amount to take into account the missing ranks from ignored stopwords""" return max(1, rank - nb_lemmas_in_stopwords)
322e00f8836efcfc4c10e26ca1a06325217118ce
683,973
def compute_loss(criterion, outputs, labels, batch_size): """ Helper function to compute the loss. Since this is a pixel-wise prediction task we need to reshape the output and ground truth tensors into a 2D tensor before passing it in to the loss criterion. Args: criterion: pytorch loss criterion outputs (pytorch tensor): predicted labels from the model labels (pytorch tensor): ground truth labels batch_size (int): batch size used for training Returns: pytorch tensor for loss """ loss_out = outputs.transpose(1, 3) \ .contiguous() \ .view([batch_size * 128 * 128, 2]) loss_lab = labels.transpose(1, 3) \ .contiguous() \ .view([batch_size * 128 * 128, 2]) return criterion(loss_out, loss_lab)
328053eefe13bcf54f374bd7ee6bbfb2d097c1b9
683,974
def poly_smooth(x: float, n: float = 3) -> float: """Polynomial easing of a variable in range [0, 1]. Args: x (float): variable to be smoothed n (float, optional): polynomial degree. Defaults to 3. Returns: float: _description_ """ if x > 1: return 1 if x < 0: return 0 if x < 0.5: return pow(2, n - 1) * pow(x, n) return 1 - pow(-2 * x + 2, n) / 2
a97defff3ea2cadb5842dd014fea5d77d621407d
683,975
import re def del_tokens_len_one(text: str) -> str: """Delete tokens with length = 1. This is kind of a basic stopword filtering. """ text = re.sub('(\s)\w(\s)',' ',text) return text
1d36830fc5fb5e7b4e5efb8e86135711f08a1ee2
683,976
from typing import Callable from typing import Iterable from typing import Tuple from typing import List def split(predicate: Callable, iterable: Iterable) -> Tuple[List, List]: """ Splits the iterable into two list depending on the result of predicate. Parameters ---------- predicate: Callable A function taking an element of the iterable and return Ture or False iterable: Iterable Returns ------- (positives, negatives) """ positives, negatives = [], [] for item in iterable: (positives if predicate(item) else negatives).append(item) return positives, negatives
50f65b7022c7beab456825f3da6806470610f9d9
683,977
from typing import List def _entity_report(entity_name: str, messages: List[str]) -> List[str]: """ Serializes the messages of a given entity to the Report It generates a list that translates as: + entity + message one + message two""" return [f' + {entity_name}'] + [f' + {message}' for message in messages]
5eb7ad0a648054ec5ef5e126a95fda357fadead8
683,983
def make_preterminal(label, word): """returns a preterminal node with label for word""" return [label, word]
ebf5858eb1704434a10524696ebdb73f72fbb982
683,984
def isVariable(name, dataset): """ Determines if given string is an existing variable name for a variable in dataset. This helper function returns True if the given name is a valid name of a variable in the Tecplot file, False otherwise. Arguments: name -- the string that we are testing if it corresponds to a variable name or not dataset -- the tecplot.data.dataset class in which "name" must be a variable name Returns: True/False depending on whether name is a valid variable name in dataset """ try: # if name is not a valid variable name, exception is thrown (or it returns None) var = dataset.variable(name) if var is None: return False return True except: return False
764f357b71c120e72290a38f54ad639ebed5cc3f
683,988
def fgrep(text, term, window=25, with_idx=False, reverse=False): """Search a string for a given term. If found, print it with some context. Similar to `grep -C 1 term text`. `fgrep` is short for faux grep. Parameters ---------- text: str Text to search. term: str Term to look for in text. window: int Number of characters to display before and after the matching term. with_idx: bool If True, return index as well as string. reverse: bool If True, reverse search direction (find last match rather than first). Returns ------- str or tuple[int, str]: The desired term and its surrounding context. If the term isn't present, an empty string is returned. If with_idx=True, a tuple of (match index, string with text) is returned. """ idx = text.rfind(term) if reverse else text.find(term) if idx == -1: res = '' else: res = text[max(idx-window, 0):idx+window] return (idx, res) if with_idx else res
8c8cc20ba6cfeba6006671e1b160a740179ef0ce
683,989
import re def fix_gitlab_links(base_url, text): """ Fixes gitlab upload links that are relative and makes them absolute """ matches = re.findall('(\[[^]]*\]\s*\((/[^)]+)\))', text) for (replace_string, link) in matches: new_string = replace_string.replace(link, base_url + link) text = text.replace(replace_string, new_string) return text
2a101b899ed8d02d725539faf773ddd2ada6daea
683,992
def get_objlist(*, galaxy_catalog, survey, star_catalog=None, noise=None): """ get the objlist and shifts, possibly combining the galaxy catalog with a star catalog Parameters ---------- galaxy_catalog: catalog e.g. WLDeblendGalaxyCatalog survey: descwl Survey For the appropriate band star_catalog: catalog e.g. StarCatalog noise: float Needed for star catalog Returns ------- objlist, shifts objlist is a list of galsim GSObject with transformations applied. Shifts is an array with fields dx and dy for each object """ objlist, shifts = galaxy_catalog.get_objlist(survey=survey) if star_catalog is not None: assert noise is not None res = star_catalog.get_objlist( survey=survey, noise=noise, ) sobjlist, sshifts, bright_objlist, bright_shifts, bright_mags = res else: sobjlist = None sshifts = None bright_objlist = None bright_shifts = None bright_mags = None return { 'objlist': objlist, 'shifts': shifts, 'star_objlist': sobjlist, 'star_shifts': sshifts, 'bright_objlist': bright_objlist, 'bright_shifts': bright_shifts, 'bright_mags': bright_mags, }
b71f8fcbebc9fa2aba00e92848882669d658b275
683,993
import re def clean_newline(text: str) -> str: """Filter newlines. Arguments: text: The text to be filtered. Returns: The filtered text. """ return re.sub("\n", "", text)
c229f59fe862b4120653f6df7c7dd8c20ab8090b
683,999
import copy def thrift_to_dict(thrift_inst, update_func=None): """convert thrift instance into a dict in strings :param thrift_inst: a thrift instance :param update_func: transformation function to update dict value of thrift object. It is optional. :return dict: dict with attributes as key, value in strings """ if thrift_inst is None: return None gen_dict = copy.copy(thrift_inst).__dict__ if update_func is not None: update_func(gen_dict, thrift_inst) return gen_dict
9fd9febdbb0752e4bf6b7696d84fc074369d7406
684,000
def sum_reduce(iter, params): """ Sums the values for each key. This is a convenience function for performing a basic sum in the reduce. """ buf = {} for key, value in iter: buf[key] = buf.get(key, 0) + value return buf.items()
f848ea3c37778a63ad7b96bd489afd8b0112ba80
684,003
def _totalUniqueWords(dataset, index): """ Given a dataset, compute the total number of unique words at the given index. GIVEN: dataset (list) list of lists, where each sublist is a document index (int) index in dataset to count unique words RETURN: unique_words (int) total number of unique words in dataset """ all_words = list() for d in dataset: words = d[index].split(" ") all_words.extend(words) unique_words = len(set(all_words)) return unique_words
25b834edd233e6c2f2c78f8f51e3baf08ab963d2
684,004
from typing import Dict from typing import List def get_parents(class_name: str, parent_mapping: Dict[str, List[str]]) -> List[str]: """ Recursively resolve all parents (ancestry) of class_name :param class_name: class to resolve :param parent_mapping: mapping of class_name -> parents :return: List[str], list of parent classes """ parents: List[str] = parent_mapping[class_name] for parent in parent_mapping[class_name]: parents = [*parents, *get_parents(parent, parent_mapping)] return parents
c7d4f757274e92fce06438047da35480f07eee94
684,005
import re def matching(value, pattern, casesensitive=True): """ Filter that performs a regex match :param value: Input source :type value: str :param pattern: Regex Pattern to be matched :return: True if matches. False otherwise :rtype: bool """ flags = re.I if not casesensitive else 0 return re.match(str(pattern), str(value), flags) is not None
2a54309730cd320e39d69db30a13e8cfead0524b
684,007
def collate_fn(batch): """Pack batch""" return tuple(batch)
f5bf743106933194440b0c35fe7c7c3d570cc468
684,008
import six def get_members(group): """Get a list of member resources managed by the specified group. Sort the list of instances first by created_time then by name. """ resources = [] if group.nested(): resources = [r for r in six.itervalues(group.nested()) if r.status != r.FAILED] return sorted(resources, key=lambda r: (r.created_time, r.name))
a4925f1597faa3b149d60ae0f1b293a331a3fbd0
684,009
def get_albums(tracks): """ Returns a dict where: key: album_id value: list of track ids """ albums = {} for _,row in tracks.iterrows(): album = row['album'][1:-1] if album != '' and album != 'None': if album in albums: albums[album].append(row['track_id']) else: albums[album] = [row['track_id']] return albums
b4d56f1f75574b58f121d65e92a2e25d0febb2ec
684,011
import re def has_spdx_text_in_analysed_file(scanned_file_content): """Returns true if the file analysed by ScanCode contains SPDX identifier.""" return bool(re.findall("SPDX-License-Identifier:?", scanned_file_content))
2987ae45598bf911fbd85eddfec639d82aadcbe5
684,013
def clamp_tensor(tensor, minimum, maximum): """ Supports sparse and dense tensors. Returns a tensor with values clamped between the provided minimum and maximum, without modifying the original tensor. """ if tensor.is_sparse: coalesced_tensor = tensor.coalesce() coalesced_tensor._values().clamp_(minimum, maximum) return coalesced_tensor else: return tensor.clamp(minimum, maximum)
fe0c1c3b4fcc7ec057354e8cbf9cfdd58a514752
684,014
def extract_picard_stats(path): """ Extract relevant information from picard wgs or size stats file. This is assumed to be for a single sample and that there will only be two lines in the "METRICS CLASS" section, which is the only section we'll extract. No effort is made to convert strings to numbers for the stat values. Args: path (str): path to the picard wgs stats file Returns: dict: keys as stat names and the values as stat values """ with open(path) as statsf: split_lines = [] keep_line = False for line in statsf: if keep_line: split_lines.append(line.strip().split("\t")) # if we see metrics label, set flag to start collecting data if line.startswith("## METRICS CLASS"): keep_line = True # stop at first empty line, though in practice we expect this # to happen after exactly 2 lines read if keep_line and not line.strip(): break # expecting only 2 lines, header row and values row stats = dict(zip(split_lines[0], split_lines[1])) return stats
8e8cec2b05a97817ece098635b875081a4fd89c8
684,017
def canAcceptVassal(masterTeam, vassalTeam, bAtWar): """ Returns True if <vassalTeam> can become a vassal of <masterTeam>. Pass True for <bAtWar> to test for capitulation and False to test for peaceful vassalage. """ if masterTeam.getID() == vassalTeam.getID(): return False if masterTeam.isAVassal() or vassalTeam.isAVassal(): return False if masterTeam.isAtWar(vassalTeam.getID()) != bAtWar: return False # master must possess tech return masterTeam.isVassalStateTrading()
11b19e0b533608c971f01eff9615499485bf779a
684,018
def unpack_bytes(word): """Unpacks a 32 bit word into 4 signed byte length values.""" def as_8bit_signed(val): val = val & 0xff return val if val < 128 else val - 256 return (as_8bit_signed(word), as_8bit_signed(word >> 8), as_8bit_signed(word >> 16), as_8bit_signed(word >> 24))
dc00ff14bebcc72f90511fdfe641086674a3245f
684,019
def lowerBound(sortedCollection, item, key=lambda x: x): """ Given a sorted collection, perform binary search to find element x for which the following holds: item > key(x) and the value key(x) is the largest. Returns index of such an element. """ lo = 0 hi = len(sortedCollection) while lo < hi: mid = (lo + hi) // 2 if item > key(sortedCollection[mid]): lo = mid + 1 else: hi = mid return lo - 1
f74b44492180953c434e03a0b832444a7775fc52
684,023
import torch def get_topk_class(probability, top_k): """ select top k class according probability for multi-label classification :param probability: Tensor of (batch_size, class_size), probability :param topk: int, number of selected classes :returns: Tensor of long (batch_size, class_size), whether class is selected, 1 means class selected, 0 means class unselected """ # batch_size * class_size device = probability.device batch_size, class_size = probability.size() # batch_size * k label_index = probability.topk(top_k, dim=-1, sorted=False)[1] batch_index = torch.arange(batch_size).unsqueeze(-1).expand(-1, top_k) # batch_size * class_size label = torch.zeros(batch_size, class_size).long() batch_index = batch_index.to(device) label = label.to(device) label[batch_index, label_index] = 1 return label
21b6fda6f1704f9d3c324c51b6dd48a297f1b834
684,024
def gt_pseudophase(g): """ Return pseudophased genotype call. Parameters ---------- g : str Genotype call. Returns ------- str Pseudophased genotype call. Examples -------- >>> from fuc import pyvcf >>> pyvcf.pseudophase('0/1') '0|1' >>> pyvcf.pseudophase('0/0:34:10,24') '0|0:34:10,24' """ l = g.split(':') l[0] = l[0].replace('/', '|') return ':'.join(l)
de5478d09fa3f9955f48a5a6af5187b315431871
684,025
def get_users(user_file_path): """read usernames from file.""" user_file = open(user_file_path) users = user_file.readlines() user_file.close() return users
ca98a42685d59f8d44f4b30139a0c1d4ac86c76e
684,026
def transform( event, settings ): """ Takes a pipeline event and transforms it to pipeline event containing an array of payloads """ adapters = settings.adapters() payloads = [] for index in range( 0, len( adapters ) ): outputter = settings.outputters[ index ] if not outputter.passthru: output = adapters[ index ].dumps( event['record'] ) else: output = adapters[ index ].dumps( event['message'] ) payloads.append( output ) return { 'bookmark': event['bookmark'], 'payloads': payloads }
cbec1b2d684531a0ec98496e9875589b730b7c29
684,027
def _strip_predicate(s): """Remove quotes and _rel suffix from predicate *s*""" if s.startswith('"') and s.endswith('"'): s = s[1:-1] elif s.startswith("'"): s = s[1:] if s[-4:].lower() == '_rel': s = s[:-4] return s
a3f2399ada296d69612f8403d5cfce8b2cc91c80
684,030
import math def cosine_annealing_lr( iteration: int, num_iterations: int, initial_lr: float, final_lr: float, ): """ Cosine annealing NO restarts Args: iteration: current iteration num_iterations: total number of iterations of coine lr initial_lr: learning rate to start final_lr: learning rate to end Returns: float: learning rate """ return final_lr + 0.5 * (initial_lr - final_lr) * (1 + \ math.cos(math.pi * float(iteration) / float(num_iterations)))
40a69f2a6495afe6e49447065993f16d19eb42b2
684,032
from typing import Union def expand_fine_modality_questions( answer: str, matched_word: str, modality: Union[None, str] ): """Create new questions for fine modality task with given information. Args: answer: Original answer to the question matched_word: The keyword which labeled the original question as fine_modality modality: name of the scan type present in original question. Returns: dict of generated questions including the original question """ binary, categorical = {}, {} if modality == "ct": modality = "ct scan" if modality == "pet": modality = "pet scan" if answer in ["yes", "no"]: if matched_word == "iv_contrast": binary["was iv_contrast given to the patient?"] = answer elif matched_word == "gi_contrast": binary["was gi_contrast given to the patient?"] = answer if ("t1" in matched_word) and answer == "yes": binary["is this a t1_weighted image?"] = "yes" binary["is this a t2_weighted image?"] = "no" binary["is this a flair image?"] = "no" if ("t1" in matched_word) and answer == "no": binary["is this a t1_weighted image?"] = "no" if ("t2" in matched_word) and answer == "yes": binary["is this a t1_weighted image?"] = "no" binary["is this a t2_weighted image?"] = "yes" binary["is this a flair image?"] = "no" if ("t2" in matched_word) and answer == "no": binary["is this a t2_weighted image?"] = "no" if ("flair" in matched_word) and answer == "yes": binary["is this a t1_weighted image?"] = "no" binary["is this a t2_weighted image?"] = "no" binary["is this a flair image?"] = "yes" if ("flair" in matched_word) and answer == "no": binary["is this a flair image?"] = "no" if (matched_word == "contrast") and modality: binary[f"is this a noncontrast {modality}?"] = ( "no" if answer == "yes" else "yes" ) binary[f"was the {modality} taken with contrast?"] = ( "yes" if answer == "yes" else "no" ) if (matched_word == "noncontrast") and modality: binary[f"is this a noncontrast {modality}?"] = ( "yes" if answer == "yes" else "no" ) binary[f"was the {modality} taken with contrast?"] = ( "no" if answer == "yes" else "yes" ) else: if matched_word == "contrast": categorical["what type of contrast did this patient have?"] = answer if ("t1" in answer) or ("t2" in answer) or ("flair" in answer): categorical["is this a t1_weighted, t2_weighted, or flair image?"] = answer categorical["is this image modality t1, t2, or flair?"] = answer if "t1" in answer: binary["is this a t1_weighted image?"] = "yes" elif "t2" in answer: binary["is this a t2_weighted image?"] = "yes" elif "flair" in answer: binary["is this a flair image?"] = "yes" else: binary["is this a t1_weighted image?"] = "no" binary["is this a t2_weighted image?"] = "no" binary["is this a flair image?"] = "no" return {"binary": binary, "categorical": categorical}
485f989103847f1bd0ae233bb8132544f15dc820
684,037
def _parse_volumes_param(volumes): """Parse volumes details for Docker containers from blueprint Takes in a list of dicts that contains Docker volume info and transforms them into docker-py compliant (unflattened) data structures. Look for the `volumes` parameters under the `run` method on [this page](https://docker-py.readthedocs.io/en/stable/containers.html) Args: volumes (list): List of { "host": { "path": <target path on host> }, "container": { "bind": <target path in container>, "mode": <read/write> } } Returns: dict of the form { <target path on host>: { "bind": <target path in container>, "mode": <read/write> } } if volumes is None then returns None """ if volumes: return dict([ (vol["host"]["path"], vol["container"]) for vol in volumes ]) else: return None
cf5e608bdb44583e320d9f3928283504e80b9647
684,040
from typing import Dict from typing import Optional def role_has_tag(role: Dict, key: str, value: Optional[str] = None) -> bool: """ Checks a role dictionary and determine of the role has the specified tag. If `value` is passed, This function will only return true if the tag's value matches the `value` variable. :param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call) :param key: key of the tag :param value: optional value of the tag :return: """ for tag in role.get("Tags", []): if tag.get("Key") == key: if not value or tag.get("Value") == value: return True return False
086410b2c8fbf4b2fb0d5be44d9a5196181a4e6d
684,041
def get_datasets(datasets=''): """Gets the list of dataset names. Args: datasets: A string of comma separated dataset names. Returns: A list of dataset names. """ return [d.strip() for d in datasets.split(',')]
a0b520348978de25df2bfdefd123200a2f3d1145
684,043
import functools def synchronized(obj): """ This function has two purposes: 1. Decorate a function that automatically synchronizes access to the object passed as the first argument (usually `self`, for member methods) 2. Synchronize access to the object, used in a `with`-statement. Note that you can use #wait(), #notify() and #notify_all() only on synchronized objects. # Example ```python class Box(Synchronizable): def __init__(self): self.value = None @synchronized def get(self): return self.value @synchronized def set(self, value): self.value = value box = Box() box.set('foobar') with synchronized(box): box.value = 'taz\'dingo' print(box.get()) ``` # Arguments obj (Synchronizable, function): The object to synchronize access to, or a function to decorate. # Returns 1. The decorated function. 2. The value of `obj.synchronizable_condition`, which should implement the context-manager interface (to be used in a `with`-statement). """ if hasattr(obj, 'synchronizable_condition'): return obj.synchronizable_condition elif callable(obj): @functools.wraps(obj) def wrapper(self, *args, **kwargs): with self.synchronizable_condition: return obj(self, *args, **kwargs) return wrapper else: raise TypeError('expected Synchronizable instance or callable to decorate')
29622cbef04c0da4a6e4921898d6c3f818888de0
684,044
import logging def _set_root_logger(loglevel=logging.INFO): """ Setup the root logger. Parameters ---------- loglevel: int, optional The log level to set the root logger to. Default :attr:`logging.INFO` Returns ------- :class:`logging.Logger` The root logger for Faceswap """ rootlogger = logging.getLogger() rootlogger.setLevel(loglevel) return rootlogger
bc4d36685d271500fcda7a3743790976a6aa2f6a
684,045
def _recurse_binary_exponentiation(num, power): """ Recursively calculate num**power quickly (via binary exponentiation). Helper function. We did parameter checks before so that we don't have to do them inside every recursive call. """ if power == 1: return num num_squared = num * num if power % 2 == 0: # power was even return _recurse_binary_exponentiation(num_squared, power // 2) else: # power was odd return num * _recurse_binary_exponentiation(num_squared, power // 2)
96e2e95952c84078bcb211229496eb348110f720
684,046
from datetime import datetime def today(tz=None) -> datetime: """get datetime of today (no time info)""" now = datetime.now(tz) return datetime(year=now.year, month=now.month, day=now.day)
e8e921c74c92e17ffdbdc029c04905072ada7687
684,050
def _GetGetRequest(client, health_check_ref): """Returns a request for fetching the existing health check.""" return (client.apitools_client.healthChecks, 'Get', client.messages.ComputeHealthChecksGetRequest( healthCheck=health_check_ref.Name(), project=health_check_ref.project))
b81e33e663c8939831ca055fcf143e1d9d1ab96c
684,052
import six def exact_filter(query, model, filters): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values """ filter_dict = {} if filters is None: filters = {} for key, value in six.iteritems(filters): if isinstance(value, (list, tuple, set, frozenset)): column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: filter_dict[key] = value if filter_dict: query = query.filter_by(**filter_dict) return query
b02748c0b46dfd03eca9de65d08ebace57baa8a2
684,053
def filter_dose_by_individual_species(doses, species): """Filter out relevent doses by the species name If it does find doses with the specific species name, it returns a list of these. If it doesn't find any doses with the specific species name, it returns None. :param doses: A list of dose objects :type list: :param species: :type string: :returns: either None or a list of dose objects """ relevant_doses = [] for dose in doses: if dose.individual_species.species_name == species.species_name: relevant_doses.append(dose) return relevant_doses
125e0dbfe9f9a81ff6597c8e7db932dfb8b3d209
684,054
def check_bit(val, n): """ Returns the value of the n-th (0 index) bit in given number """ try: if val & 2**n: return 1 else: return 0 except TypeError: return -1
1c889ce569a4d5ed4236cb95c8ad700ad1aa4c20
684,060
def getAccurateFocalLengths(imageSize, focalLength, sensorSize): """ Parameters: image size x,y (pixels), focalLength (mili meters), sensorSize x,y (meters) Focal length listed on the image exif is unitless... We need focal length in pixels / meters. Therefore, we scale the exif focal length by number of pixels per meter on the actual CCD sensor. """ w_s = sensorSize[0] # in meters h_s = sensorSize[1] w_i = imageSize[0] # in pixels h_i = imageSize[1] f = focalLength / 1000.0 # milimeters to meters focalLengthPixelsPerMeter = (w_i / w_s * f, h_i / h_s * f) return focalLengthPixelsPerMeter
8dd408e9c4a5ce9df4d1c6c440484310d86c4089
684,063
import base64 def base_64_encode(key, secret): """encodes key and secret in base 64 for the auth credential""" conversion_string = "{}:{}".format(key, secret).encode('ascii') auth_credential = base64.b64encode(conversion_string) auth_credential = auth_credential.decode() return auth_credential
cf75ebc44e4232715029e3d0315d6609436d8bb6
684,064
from pathlib import Path def get_or_create_path(path_string, parents=True): """ Get path object from string, create if non-existing""" p = Path(path_string) if not p.exists(): p.mkdir(parents=parents) return p
fbfaccb60374fbbb67d7ae3ee242d455d2e3863c
684,065
def no_duplicates(seq): """ Remove all duplicates from a sequence and preserve its order """ # source: https://www.peterbe.com/plog/uniqifiers-benchmark # Author: Dave Kirby # Order preserving seen = set() return [x for x in seq if x not in seen and not seen.add(x)]
47596bb28bf4634ff9c76a8b50f76e4301960447
684,069
import secrets def draft_conv_key() -> str: """ Create reference for a draft conversation. """ return secrets.token_hex(10)
03d8b9c5dbc8530c25ee7b9d86bbcb185ad0d87a
684,074
def _errmsg(argname, ltd, errmsgExtra=''): """Construct an error message. argname, string, the argument name. ltd, string, description of the legal types. errmsgExtra, string, text to append to error mssage. Returns: string, the error message. """ if errmsgExtra: errmsgExtra = '\n' + errmsgExtra return "arg '%s' must be %s%s" % (argname, ltd, errmsgExtra)
de4072600a2624fa4ce0d4c213bc60d822143c41
684,076
def replace_chars(string, chars=r':\/|<>?*"', replacement=''): """ Return `string` with any char in the `chars` replaced by `replacement`. Defaults to replace problematic/invalid chars for filenames/paths. """ for c in string: if c in chars: string = string.replace(c, replacement) return string
4a4af79cc960e1052be13958a733eb29a075ad12
684,078
import string import secrets def code_verifier(length: int = 128): """ Return a cryptographically random string as specified in RFC7636 section 4.1 See https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 :param length: length of the generated string, minimum 43, maximum 128. Defaults to 128 :return: """ vocab = string.ascii_letters + '-._~0123456789' return ''.join([secrets.choice(vocab) for _ in range(length)])
9e29a98fa9bf26f19a251098b4f656022d3de8ee
684,081
def crop(image, rectangle): """ Crop out input image's fragment specified by the rectangle. :param image: input image :param rectangle: rectangle, which indicates cropped area :return: cropped original image fragment """ x, y, w, h = rectangle return image[y:y + h, x:x + w]
5e08f917878954a19141c631e79562f110a27b61
684,082
def lookup_newsletter_recipients(resource): """ Callback function to look up the recipients corresponding to a distribution list entry (in this instance: send all newsletters to orgs) Args: the (filtered) resource Returns: a list of pe_ids of the recipients """ if resource.tablename == "cr_shelter": rows = resource.select(["organisation_id$pe_id"], as_rows=True) return [row.org_organisation.pe_id for row in rows] elif resource.tablename == "org_organisation": rows = resource.select(["pe_id"], as_rows=True) return [row.pe_id for row in rows] else: return []
e68ddf37cb5b879fa91514fe21fa0f7e9ac153e5
684,083
def lex_tokenize(tokenized_sentence): """Returns a list of lexes from a given tokenizer.TokenizedSentence instance. Each lex is represented as a 3-tuples of (start, end, token).""" return [(lex.begin, lex.end, lex.text) for (token, lex) in tokenized_sentence.as_pairs()]
fe47f9a327d4e8ac7ad65394426172746d72f0f6
684,086
def ensure_list(config): """ ensure_list Ensure that config is a list of one-valued dictionaries. This is called when the order of elements is important when loading the config file. (The yaml elements MUST have hyphens '-' in front of them). Returns config if no exception was raised. This is to keep the same format as ensure_dictionary, and allowed possible config file repairs in the future without breaking the API. """ if not isinstance(config, list): raise TypeError("config is not a list. Did you forget some '-' "+ "in your configuration file ?\n" + str(config)) for element in config: if isinstance(element, str): continue if not isinstance(element, dict): raise ValueError("Parsing error in the configuration file.\n" + str(element)) if len(element) != 1: raise ValueError("Parsing error in the configuration file.\n" + str(element)) return config
56397e3eb6ab98d40392a668112febc77f11d9cc
684,087
from datetime import datetime def json_serial(obj): """JSON serializer for objects not serializable by default json code""" # yyyy-MM-dd'T'HH:mm:ss.SSS strict_date_hour_minute_second_millis if isinstance(obj, datetime): tz_string = "Z" serial = "%s.%03d" % ( obj.strftime("%Y-%m-%dT%H:%M:%S"), int(obj.microsecond / 1000)) return serial raise TypeError("Type not serializable")
3f1b6daf4309a32d18a221be14c01662358c40b0
684,089
def generate_source(email): """Generate a source code to be used in links inside an email""" return u"ev_{date}_{uuid}".format( date=email.created_at.strftime('%Y%m%d'), uuid=email.uuid)
d77a2f669b200d3ff66dc7eaff9ae7b192c37673
684,090
from pathlib import Path def make_save_path(save_path, net_name, net_number, epochs): """make a unique save path for model and checkpoints, using network architecture, training replicate number, and number of epochs""" save_path = Path(save_path).joinpath( f'trained_{epochs}_epochs', f'net_number_{net_number}') if not save_path.is_dir(): save_path.mkdir(parents=True, exist_ok=True) stem = f'{net_name}_trained_{epochs}_epochs_number_{net_number}' save_path = save_path.joinpath(stem) return save_path
e4462c9e5d422008a79f8062cc446d069bff42e9
684,092
def alter_board(board, player, cell): """Alter board string for player input""" board = list(board) # enter player letter in supplied cell board[cell - 1] = player return ''.join(board)
895b7e592f3ba530e4c8d554a5c07f5823b1b4b1
684,099
import torch def cov(m, rowvar=False): """Estimate a covariance matrix given data. Covariance indicates the level to which two variables vary together. If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`, then the covariance matrix element `C_{ij}` is the covariance of `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`. Args: m: A 1-D or 2-D array containing multiple variables and observations. Each row of `m` represents a variable, and each column a single observation of all those variables. rowvar: If `rowvar` is True, then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. Returns: The covariance matrix of the variables. """ if m.dim() > 2: raise ValueError('m has more than 2 dimensions') if m.dim() < 2: m = m.view(1, -1) if not rowvar and m.size(0) != 1: m = m.t() # m = m.type(torch.double) # uncomment this line if desired fact = 1.0 / (m.size(1) - 1) m -= torch.mean(m, dim=1, keepdim=True) mt = m.t() # if complex: mt = m.t().conj() return fact * m.matmul(mt).squeeze()
5b7d0c16741d94e80c4ae36c9b0c83e0c1ae5c21
684,100
def process_notebook_name(notebook_name: str) -> str: """Processes notebook name :param notebook_name: Notebook name by default keeps convention: [3 digit]-name-with-dashes-with-output.rst, example: 001-hello-world-with-output.rst :type notebook_name: str :returns: Processed notebook name, 001-hello-world-with-output.rst -> 001. hello world :rtype: str """ return ( notebook_name[:3] + "." + " ".join(notebook_name[4:].split(".")[0].split("-")[:-2]) )
0ac2d9a768a4cf6eb26844336d49b2d3e91c159d
684,103
def integerize(num, count): """Calculate and return integer value if result is integer""" calc = num * count calc = int(calc) if calc.is_integer() else calc return calc
7f45ca0ee30f49ab0d12df8f7641c7e3a32855f1
684,106
import requests def get_redirected_url(url: str) -> str: """ Given a URL, return the URL it redirects to. Args: url (str) """ r = requests.get(url) return r.url
fb2b2a848ef528686e8c7e197dee2962544b3097
684,107
def _find_var_dictionary(table, dict_name=None, dict_type=None): ############################################################################### """Find and return a var_dictionary named, <dict_name> in <table>. If not found, return None""" var_dicts = table.find("var_dictionaries") target_dict = None if (dict_name is None) and (dict_type is None): raise ValueError(("At least one of <dict_name> or <dict_type> must " "contain a string")) # end if for vdict in var_dicts: if (((dict_name is None) or (vdict.get("name") == dict_name)) and ((dict_type is None) or (vdict.get("type") == dict_type))): target_dict = vdict break # end if # end for return target_dict
fcd8519dd3b2b26ae6d8794e5b67a30b08b8060b
684,109
def return_top(scores, metric, x): """ :param scores: Pandas DataFrame with scores :type scores: Pandas DataFrame :param metric: String value for what score is desired ("Growth Score", "Value Score", "Momentum Score", "Score") :type metric: str :param x: Integer number of top stocks to return :type x: int :return: return top x number of stocks by score as Pandas DataFrame :rtype: Pandas DataFrame """ return scores.nlargest(x, [metric])
b6dcdd3c89a21d285d35f2b15aba7c4a988c6da0
684,110
def CollateDeps(deps_content): """ Take the output of deps_utils.GetDepsContent and return a hash of: { submod_name : [ [ submod_os, ... ], submod_url, submod_sha1 ], ... } """ spliturl = lambda x: list(x.partition('@')[0::2]) if x else [None, None] submods = {} # Non-OS-specific DEPS always override OS-specific deps. This is an interim # hack until there is a better way to handle OS-specific DEPS. for (deps_os, val) in deps_content[1].iteritems(): for (dep, url) in val.iteritems(): submod_data = submods.setdefault(dep, [[]] + spliturl(url)) submod_data[0].append(deps_os) for (dep, url) in deps_content[0].iteritems(): submods[dep] = [['all']] + spliturl(url) return submods
a3c43e14e45b5478bf2859c2d8ea7b7bac857d99
684,113
def _encode_none(name, dummy0, dummy1, dummy2): """Encode python None.""" return b"\x0A" + name
47bb28d3ecb4e16cf2314bf51d4495589939b38f
684,114
def scenegraph_to_json(sg): """ Dump an "SceneGraph" object to a dict that's used for evaluation. The output will be saved as json. Args: sg (SceneGraph): Returns: dict: contains predictions for one image. """ boxes = sg.pred_boxes.tensor.numpy().tolist() # for vg evaluation, all boxes should be in XYXY_ABS scores = sg.scores.numpy().tolist() classes = sg.pred_classes.numpy().tolist() rel_scores = sg.rel_scores.numpy().tolist() rel_inds = sg.rel_inds.numpy().tolist() result = { "category_ids": classes, "bboxes": boxes, "scores": scores, "rel_scores": rel_scores, "rel_inds": rel_inds } return result
2a39a81dfe6fcc14b8528c08bdad9521f35461db
684,120
import hashlib def get_md5(file): """Get the md5 checksum of an input file. Arguments --------- file : str Path to file for which compute the checksum. Returns ------- md5 Checksum for the given filepath. Example ------- >>> get_md5('samples/audio_samples/example1.wav') 'c482d0081ca35302d30d12f1136c34e5' """ # Lets read stuff in 64kb chunks! BUF_SIZE = 65536 md5 = hashlib.md5() # Computing md5 with open(file, "rb") as f: while True: data = f.read(BUF_SIZE) if not data: break md5.update(data) return md5.hexdigest()
54386f58894bbebeaf280fa008ed7bcecc6c440e
684,121
def process_mmdet_results(mmdet_results, cat_id=0): """Process mmdet results, and return a list of bboxes. :param mmdet_results: :param cat_id: category id (default: 0 for human) :return: a list of detected bounding boxes """ if isinstance(mmdet_results, tuple): det_results = mmdet_results[0] else: det_results = mmdet_results return det_results[cat_id]
d4df0645bd8fb9af2d5500e84f5ece3aec933418
684,124
def assemble_subpeak_record(subpeak, celltype_activations, sequence): """ Assemble the FASTA record of sequence and activation """ # make the header header ='\t'.join([s for s in subpeak]) header = '>' + header # make the activation string activation = ';'.join([ct+' '+str(score) for (ct,score) in celltype_activations]) # append the sequence seq_string = str(sequence) return header, activation, seq_string
5bddb16130dba20bb9d1c4f341d842371a71838a
684,125
from typing import Dict def is_state_nncf(state: Dict) -> bool: """The function to check if sate is the result of NNCF-compressed model.""" return bool(state.get("meta", {}).get("nncf_enable_compression", False))
bfa60e69eb54e287b44950edc0bdfafcaa15d1b5
684,127
def read_split(split_filename): """ Return a list of pdb codes included in the split. """ print('Reading split from file', split_filename) with open(split_filename, 'r') as f: pdbcodes = [t.strip() for t in f.readlines()] return pdbcodes
651f55c40c15ded4fc103ac37db4aa96891c5201
684,129
def clean_info(package_dict: dict) -> dict: """ Only keep `name` and `home-page` keys. Arguments: package_dict: Package information. Returns: Cleaned-up information. """ return {_: package_dict[_] for _ in ("name", "home-page")}
e20bb748b9322d94e70e7b53f4ab76e2ab9f61ee
684,132
def extended_euclidean_algorithm(a, b): # @param ax + by = gcd(a,b) """ Based on the fact that: b % a = b - (b // a) * a\\ gcd(a, b) = gcd(b%a, a) """ if a == 0: return b, 0, 1 gcd, x1, y1 = extended_euclidean_algorithm(b % a, a) x = y1 - (b//a)*x1 y = x1 return (gcd, x, y)
002bcc088eb065ee6c8718bb0e06b57aa80020f1
684,137
def cidade_pais(cidade: str, pais: str, populacao: int = 0) -> str: """ -> Aceita o nome de uma cidade, o nome do país e a população dessa cidade, retorna essas informações formatadas. :param cidade: Nome da cidade. :param pais: Nome do país. :param populacao: Número da população da cidade. :return: Retorna uma string com o nome da cidade, do país e a população a cidade no formato 'Cidade, País - população XXX'. """ if populacao: nome_formatado = (f'{cidade.title()}, {pais.title()} ' f'- população {populacao}') else: nome_formatado = f'{cidade}, {pais}'.title() return nome_formatado
55933630a3bdcc8b6d845b101b5c67baa95f957a
684,138
def string_to_bits(str): """ string_to_bits Function Converts a Pythonic string to the string's binary representation. Parameters ---------- str: string The string to be converted. Returns ------- data: string The binary representation of the input string. """ data = (''.join(format(ord(x), 'b') for x in str)) return(data)
2d86c9cf26b45adb81a0df37b5e0b7bcb5399f06
684,139
import torch import math def uniform_binning_correction(x, n_bits=8): """Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0). Args: x: 4-D Tensor of shape (NCHW) n_bits: optional. Returns: x: x ~ U(x, x + 1.0 / 256) objective: Equivalent to -q(x)*log(q(x)). """ b, c, h, w = x.size() n_bins = 2**n_bits chw = c * h * w x += torch.zeros_like(x).uniform_(0, 1.0 / n_bins) objective = -math.log(n_bins) * chw * torch.ones(b, device=x.device) return x, objective
22f1cfe6b66ac11788a15edb281b6dee4e213654
684,143