content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def get_task_args_and_kwargs(file_path): """Get the args and kwargs for the task.""" text = '' with open(file_path, 'r') as f: for line in f: text += line.strip() + ' ' reg = re.search('Task was called with args: (\(.*?\)) ' 'kwargs: ({.*?})', text) if reg: return reg.group(1), reg.group(2) # Can't parse arguments b/c they're not in the email. return None, None
d27b170879b4612637c5669ed6e2700d56861d49
684,394
import yaml def gen_lookup(file): """ Generating the lookup table between api-endpoints and elasticsearch instances from the configuration of the lod-api. lookup table is of the form: {"/api/endpoint": "http://elasticsearchhost:port/index/doctype", "/resource": "http://elasticsearchhost:9200/resource/data", "/works": …, } returns: dict. """ es_lookup = dict() # read complete api configuration with open(file, 'r') as instream: config = yaml.safe_load(instream) # generate lookup for source indices for source, instance in config["source_indices"].items(): key = "/source/{}".format(source) # remove trailing slash from index-URL es_lookup[key] = instance.rstrip("/") # generate remaining index lookups es_address = "http://{h}:{p}".format(h=config["es_host"], p=config["es_port"]) for ctx in config["indices"]: doctype = config["indices"][ctx].get("type") index = config["indices"][ctx].get("index") es_lookup["/" + index] = "{h}/{i}/{t}".format( h=es_address, i=index, t=doctype) return es_lookup
a007a0be1c18b585b9ebf66b194e55458f764146
684,398
from typing import Any def default_tile_extras_provider(hyb: int, ch: int, z: int) -> Any: """ Returns None for extras for any given hyb/ch/z. """ return None
7e7c77481c06e836a531406f18468fa0edad327b
684,400
from typing import Dict from typing import Any def copa_preformat_fn(ex: Dict[str, Any]) -> Dict[str, Any]: """format for COPA tasks. Args: ex: Dictionary with one sample Returns: ex: Formatted dictionary """ premise, question = ex['premise'].decode(), ex['question'].decode() input_prompt = f'{premise} What was the {question} of this?' choices = { ex['choice1'].decode(): -ex['label'] + 1, ex['choice2'].decode(): ex['label'] } return {'input': input_prompt, 'target_scores': choices}
0f156d2efe6c6609a133fcb69073022ad2af735a
684,402
def bfs(G, s, f): """ Breadth-first search :param G: The graph object :param s: The start node :param f: The goal node :return: The explored set """ explored, frontier = [], [s] while frontier: v = frontier.pop(0) if v == f: pass else: explored.append(v) frontier.extend([w for w in G[v] if w not in explored]) return explored
f63dc3aaa2c3f0d4ad09ddc1dd43acca29fe37d3
684,403
import copy def norm_rda_deficit(norm_rda_arr): """ Returns a modified list of nutrient dicts in which value represents a fraction of how much a given nutrient has been satisfied. A value of 0 represents full satisfaction, and 1 represents no satisfaction. """ r_nut = copy.deepcopy(norm_rda_arr) for ni, _ in enumerate(r_nut): r_nut[ni]['value'] = 1 - r_nut[ni]['value'] return r_nut
dc0c06e92a97c281973d340dc6f0f541615344ee
684,407
def add_dicts(*args): """ Combine dicts. If there are duplicate keys, raise an error. """ new = {} for arg in args: for key in arg: if key in new: raise ValueError("Duplicate key: %r" % key) new[key] = arg[key] return new
aba33e1b81ba4398f4a46672fc5086a6cc9cbcd2
684,408
import re def get_confirm_lines(code): """ Takes the block of code submitted to RESOLVE and returns a list of the lines that start with Confirm or ensures, keeping the semicolons attached at the end, and removing all spaces (starting, ending, or in between) @param code: All code submitted to RESOLVE verifier @return: List of confirm/ensures statements, missing the confirm/ensures but with their semicolons, all spaces removed """ # Regex explanation: [^;]* is any amount of characters that isn't a semicolon, so what this is saying is find # all Confirm [characters that aren't ;]; OR ensures [characters that aren't ;]; # The parentheses tell regex what to actually return out, so the Confirm/ensures are chopped off but they did have # to be present for it to match lines = [] for match in re.findall("Confirm ([^;]*;)|ensures ([^;]*;)", code): for group in match: if group: # This check gets rid of the empty matches made by having 2 group statements # Get rid of all spaces lines.append(re.sub(" ", "", group)) return lines
53a0323c8d7c8559d3fb050a31b9b715fba5211e
684,411
def rotate_90(grid): """ Rotate a given grid by 90 degrees. Args: grid: Grid to rotate. Returns: Grid after being rotated by 90 degrees. """ new_grid = [] for col, _ in enumerate(grid): new_row = [] # Go through the rows, # and form new rows depending on the flower's column. for row in grid: new_row.append(row[col]) new_grid.append(new_row) return new_grid[::-1]
7d8364492aef992cea0db75c7973686973736063
684,416
def get_single_key_value_pair(d): """ Returns the key and value of a one element dictionary, checking that it actually has only one element Parameters ---------- d : dict Returns ------- tuple """ assert isinstance(d, dict), f'{d}' assert len(d) == 1, f'{d}' return list(d.keys())[0], list(d.values())[0]
7c3dcaf1198bceff01df0fcbf108520b8807bae5
684,418
def donner_carte(nombre:int, joueur:dict, paquet:list) -> bool: """Donne une carte Args: nombre (int): nombre de cartes à donner joueur (dict): profil du joueur paquet (list): paquet de jeu Returns: bool: True si pas d'erreurs """ for _ in range(nombre): joueur['jeu'].append(paquet.pop(0)) return True
8e5f483a33dc7d47dc02cbb80a7cd66f8286822e
684,420
def command(cmd, *parameters): """ Helper function. Prints the gprMax #<cmd>: <parameters>. None is ignored in the output. Args: cmd (str): the gprMax cmd string to be printed *parameters: one or more strings as arguments, any None values are ignored Returns: s (str): the printed string """ # remove Nones filtered = filter(lambda x: x is not None, parameters) # convert to str filtered_str = map(str, filtered) # convert to list filtered_list = list(filtered_str) try: s = '#{}: {}'.format(cmd, " ".join(filtered_list)) except TypeError as e: # append info about cmd and parameters to the exception: if not e.args: e.args = ('', ) additional_info = "Creating cmd = #{} with parameters {} -> {} failed".format(cmd, parameters, filtered_list) e.args = e.args + (additional_info,) raise e # and now we can print it: print(s) return s
5e0dbec3daf11708cb5235e2d08cfae88ef27abf
684,421
import inspect import types def find_classes_subclassing(mods, baseclass): """ Given a module or a list of modules, inspect and find all classes which are a subclass of the given baseclass, inside those modules """ # collect all modules found in given modules if not isinstance(mods, list): mods = [mods] innner_mods = inspect.getmembers(mods, lambda obj: isinstance(obj, types.ModuleType)) mods.extend(innner_mods) classes = [] for m in mods: name_klasses = inspect.getmembers(m, lambda obj: isinstance(obj, type) and issubclass(obj, baseclass)) if name_klasses: for name, klass in name_klasses: del name classes.append(klass) return classes
48ca14811909c9375afd19b7d1329af85db1fd41
684,425
def bdev_get_iostat(client, name=None): """Get I/O statistics for block devices. Args: name: bdev name to query (optional; if omitted, query all bdevs) Returns: I/O statistics for the requested block devices. """ params = {} if name: params['name'] = name return client.call('bdev_get_iostat', params)
a51b0bb8214e4c22bef1e54160bed4e5bfe8495f
684,426
def simplify_whitespace(name): """Strip spaces and remove duplicate spaces within names""" if name: return ' '.join(name.split()) return name
eafda9cc3da43f0327597e1159d13338984671e7
684,430
import toml def data_from_toml_lines(lines): """ Return a mapping of data from an iterable of TOML text ``lines``. For example:: >>> lines = ['[advisory]', 'id = "RUST1"', '', '[versions]', 'patch = [">= 1"]'] >>> data_from_toml_lines(lines) {'advisory': {'id': 'RUST1'}, 'versions': {'patch': ['>= 1']}} """ return toml.loads("\n".join(lines))
a82020a5defea4356ece2f7d01ed754feafafeae
684,431
def unames_are_equivalent(uname_actual: str, uname_expected: str) -> bool: """Determine if uname values are equivalent for this tool's purposes.""" # Support `mac-arm64` through Rosetta until `mac-arm64` binaries are ready # Expected and actual unames will not literally match on M1 Macs because # they pretend to be Intel Macs for the purpose of environment setup. But # that's intentional and doesn't require any user action. if "Darwin" in uname_expected and "arm64" in uname_expected: uname_expected = uname_expected.replace("arm64", "x86_64") return uname_actual == uname_expected
7b12db4bf89b924320e4ac0e12e1b19c76c4f3d4
684,434
def part_1b_under_equilb_design_ensemble_run_limit(job): """Check that the equilbrium design ensemble run is under it's run limit.""" try: if ( job.doc.equilb_design_ensemble_number >= job.doc.equilb_design_ensemble_max_number ): job.doc.equilb_design_ensemble_max_number_under_limit = False return job.doc.equilb_design_ensemble_max_number_under_limit else: return True except: return False
ac35846c8bc5e5cd35771bbf88c7d895802d39cc
684,441
def get_column(grid, column_index): """Return the column from the grid at column_index as a list.""" return [row[column_index] for row in grid]
220cb895fa42461a521aa9f54f7bbf240d84bd43
684,445
from typing import Tuple import requests def create_sq_project(sq_server: str, auth: Tuple, proj_id: str) -> str: """Creates a SonarQube project for the current project""" url = f"{sq_server}/api/projects/create" name = f"project-{proj_id}" args = {"name": name, "project": name, "visibility": "public"} requests.post(url=url, data=args, auth=auth) return name
0811a097e2a9c6610da22cb4c7f1160f271142af
684,448
def fit(approach, train_dict): """ Given a train data (features and labels), fit a dummy classifier. Since we can actually not fit a model, it returns the probability of complaint. Parameters ---------- approach: dictionary Approach configuration coming from the experiment yaml train_dict: dictionary Dictionary containing the train features DataFrame and the train labels DataFrame. Return ------ Class Model fitted using the train data. """ model = approach['hyperparameters']['perc_complaint'] return model
25a8a48c00ba1dda838264da60f3a3475b9b5ed4
684,449
def count_words(my_string): """ This function counts words :param my_string: str - A string of words or characters :return: int - length of words """ if not isinstance(my_string, str): raise TypeError("only accepts strings") special_characters = ['-', '+', '\n'] for character in special_characters: my_string = my_string.replace(character, " ") words = my_string.split() return len(words)
69860cb359c2150e52a3f6eda5b3090ab22fb07a
684,450
import re def toSentenceCase(text): """ Converts the given text to sentence case. Args: text (string): Text to convert to sentence case. Returns: (string): Sentence case version of given text. """ return re.sub(r"(?<=\w)([A-Z])", r" \1", text).title()
e1c608befc50a9896aa67dba38b8aef8924540d0
684,451
import re def readLinking(goldStdFile): """ Reads a file containing Entity Linking output according to the KBP 2013 format. Each line in the systeming output file consists of 2 or 3 fields: mention_id KB_ID (optional confidence, default is 1.0) Each line in the gold standard file may contain 2 or more than 3 fields, but we ignore all but the first 2 fields: mention_id KB_ID Do NOT change IDs in any way (i.e., do not Uppercase all IDs) Returns a pair of dictionaries: linking: KB_ID -> set of query_IDs el2kbid: query_ID -> list of pairs of KB_ID (or NIL ID) and confidence, highest confidence first """ linking = dict() el2kbid = dict() for line in open(goldStdFile): d = re.split("\s+", line.strip()) if len(d)<2: continue mention = d[0] kb_id = d[1] if(len(d)==3): # a system linking conf = d[2] else: conf = 1.0 if mention in el2kbid.keys(): # more than one link for this mention; put the earliest highest confidence link in front, and add additional links to back of list links = el2kbid[mention] # list of (kb_id,conf) tuples bestlink, bestconf = links[0] if conf > bestconf: links.insert(0, (kb_id, conf)) else: links.append((kb_id, conf)) else: el2kbid[mention] = [(kb_id, conf)] if kb_id in linking.keys(): linking[kb_id].add(mention) else: linking[kb_id] = set([mention]) return (linking, el2kbid)
70e4a4df885de7e9426704420a10e59b1daf0e2d
684,452
def _compute_nfp_uniform(l, u, cum_counts, sizes): """Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives. """ if l > u: raise ValueError("l must be less or equal to u") if l == 0: n = cum_counts[u] else: n = cum_counts[u]-cum_counts[l-1] return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
847e0141015ee3c5e07426420c14ef63f212dd8f
684,454
def class_counts(rows): """Counts the number of each type of example in a dataset.""" counts = {} # a dictionary of label -> count. for row in rows: # in our dataset format, the label is always the last column label = row[-1] if label not in counts: counts[label] = 0 counts[label] += 1 return counts
7ef0ca8fe014e18248edd3d0111960dc431e3cf8
684,455
def is_pandigital(n: int) -> bool: """Determine if n is pandigital.""" lst = set(sorted([int(i) for i in str(n)])) return len(lst) == 10
e324671b592c26118dc5a4308cc0501fb7511bbf
684,456
from typing import Dict from typing import Any def traverse_dict(d: Dict, key_path: str) -> Any: """ Traverse nested dictionaries to find the element pointed-to by key_path. Key path components are separated by a ':' e.g. "root:child:a" """ if type(d) is not dict: raise TypeError(f"unable to traverse into non-dict value with key path: {key_path}") # Extract one path component at a time components = key_path.split(":", maxsplit=1) if components is None or len(components) == 0: raise KeyError(f"invalid config key path: {key_path}") key = components[0] remaining_key_path = components[1] if len(components) > 1 else None val: Any = d.get(key, None) if val is not None: if remaining_key_path is not None: return traverse_dict(val, remaining_key_path) return val else: raise KeyError(f"value not found for key: {key}")
aa997f04229657996956faaec90299fe3a6c298f
684,458
def is_prime(n): """ Decide whether a number is prime or not. """ if n < 2: return False if n == 2: return True if n % 2 == 0: return False i = 3 maxi = n**0.5 + 1 while i <= maxi: if n % i == 0: return False i += 2 return True
429400f6ffe153f8548cf8ccb1c16efaccb75d35
684,459
def fluid_needed(molarity : float, mw : float , mass : float) -> float: """How much liquid do you need for a solution of a specific molarity?""" return mass/mw/molarity
9f3823b0e43ac6a7455cc9ac347fb5341eccf6b1
684,460
from pathlib import Path def get_backups_in_path(folder, file_type): """Return a collection of all backups of a specified type in a path. Use the rglob utility function to recursively search a directory for all backup files of the type `file_type`. Pass an empty string to not spe :param folder: The path to search :param file_type: The type of files to search for :returns: Collection of backups :rtype: PosixPath (as defined in pathlib) """ return Path(folder).rglob("*." + file_type + '*.backup')
cbe5569d5c520e54e55eeb99481877a73639b87e
684,463
def format_xi_stats(users_as_nodes, exp, xi_mean, xi_std, tot): """Formats the curvature estimates for logging. Args: users_as_nodes: Bool indicating which interaction graph was generated. If True (False), a user-user (item-item) interaction graph was generated. exp: Boolean indicating if the interaction graph distances are on an exponential scale. xi_mean: Float containng the mean of the curvatures of the sampled triangles. xi_std: Float containng the standard deviation of the curvatures of the sampled triangles. tot: Int containing the total number of legal sampled triangles. Returns: String storing the input information in a readable format. """ stats = 'User-user stats:' if users_as_nodes else 'Item-item stats:' if exp: stats += ' (using exp)' stats += '\n' stats += '{:.3f} +/- {:.3f} \n'.format(xi_mean, xi_std) stats += 'out of {} samples.'.format(tot) return stats
77ad6c0cc534e21cf938a28a64e6bc1711caa27b
684,466
def greatest_common_divisor(a: int, b: int) -> int: """ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b Euclid's Algorithm >>> greatest_common_divisor(7,5) 1 Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1. >>> greatest_common_divisor(121, 11) 11 """ if a < b: a, b = b, a while a % b != 0: a, b = b, a % b return b
f2e707a614e2c0b5fa73ce83fe79b1451fbdb910
684,469
def _build_xpath_expr(attrs) -> str: """ Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if "class_" in attrs: attrs["class"] = attrs.pop("class_") s = " and ".join(f"@{k}={repr(v)}" for k, v in attrs.items()) return f"[{s}]"
b1e06e2982a4e8f745b87f25c89d50353c880e3e
684,471
def insert_symbol(symbol: str, fbid: str, dict: dict): """ Modifies the dictionary in place by inserting the symbol as the key and the fbid as the value. If the symbol is already present in the dictionary the fbid is added to the unique set of FlyBase IDs in the value :param symbol:str - A single symbol to insert into the dictionary. :param fbid:str - A single FlyBase ID. :param dict:dict - The dictionary reference to modify. :return: None """ if symbol and symbol not in dict: # If we haven't seen this symbol before initialize the set. dict[symbol] = {fbid} elif symbol: # We have seen this symbol before so we add it to the set. dict[symbol].add(fbid) return None
2ec8c2915b7b716ca2fe1d6519dae6cc434f9566
684,472
import ntpath def path_leaf(path): """ guaranteed filename from path; works on Win / OSX / *nix """ head, tail = ntpath.split(path) return tail or ntpath.basename(head)
5c3bc4b4d172330da9cc553274e0eb58567dfbfe
684,477
import re def win_path_to_unix(path, root_prefix=""): """Convert a path or ;-separated string of paths into a unix representation Does not add cygdrive. If you need that, set root_prefix to "/cygdrive" """ path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\/\\\\]+(?:[^:*?"<>|]+[\/\\\\]+)*[^:*?"<>|;\/\\\\]+?(?![a-zA-Z]:))' # noqa def _translation(found_path): found = found_path.group(1).replace("\\", "/").replace(":", "").replace("//", "/") return root_prefix + "/" + found path = re.sub(path_re, _translation, path).replace(";/", ":/") return path
be9255d582a6879ed0dc651e3c7c4dd2942e7197
684,479
def entry_to_bytes(arch, entry: dict) -> bytes: """ Pack a U-Boot command table *entry* (struct cmd_tbl_s), as defined by the following dictionary keys and return it's representation in bytes. +---------------+---------------+----------------------------------------------+ | Key | Value Type | Description | +===============+===============+==============================================+ | name | int | Pointer to command name string | +---------------+---------------+----------------------------------------------+ | maxargs | int | Maximum number of arguments the command takes| +---------------+---------------+----------------------------------------------+ | cmd_rep | int | Depending upon the U-Boot version, either a | | | | flag or function pointer used for command | | | | autorepeat behavior | +---------------+---------------+----------------------------------------------+ | cmd | int | Function pointer for ``do_<command>`` | +---------------+---------------+----------------------------------------------+ | usage | int | Pointer to short usage text string | +---------------+---------------+----------------------------------------------+ | longhelp | int | Pointer to longer command description and | | | | help text. Only present if U-Boot was built | | | | with ``CONFIG_SYS_LONGHELP`` | +---------------+---------------+----------------------------------------------+ | autocomplete | int | Function pointer to autocomplete handler. | | | | Only present if U-Boot was built with | | | | ``CONFIG_AUTOCOMPLETE``. | +---------------+---------------+----------------------------------------------+ The **arch** parameter is required in order to pack the pointer values according to the target's endianness. """ ret = bytearray() ret += arch.int_to_bytes(entry['name']) ret += arch.int_to_bytes(entry['maxargs']) ret += arch.int_to_bytes(entry['cmd_rep']) ret += arch.int_to_bytes(entry['cmd']) ret += arch.int_to_bytes(entry['usage']) if 'longhelp' in entry: ret += arch.int_to_bytes(entry['longhelp']) if 'complete' in entry: ret += arch.int_to_bytes(entry['complete']) return bytes(ret)
5cfaeee6bdd20af86e81b6245480f419a5c9a8aa
684,480
import requests import json def api_call(page, username, password): """ This function makes an API call to the leitos ocupaca API within DataSUS Arguments: page: a string or integer with the number of the page to request username: a string with the username login information for the API password: a string with the password login information for the API Output: json_data: a dictionary containing the information for the requested page """ # Make API request for given page r = requests.get("https://elastic-leitos.saude.gov.br/leito_ocupacao/_search?from={}".format(str(page)), auth = (username, password)) # Turn data into json json_data = json.loads(r.text) # Return dictionary return json_data
0aa1f8cb6354220ad3fe7e5660dc356921810f80
684,482
def ref(i): """ref returns a string with a reference to object number `i`""" return b'%d 0 R' % i
252e45a33a7ddd46186222fdd513adb1daa926d3
684,486
def structure_metadata(structure): """ Generates metadata based on a structure """ comp = structure.composition elsyms = sorted(set([e.symbol for e in comp.elements])) meta = { "nsites": structure.num_sites, "elements": elsyms, "nelements": len(elsyms), "composition": comp.as_dict(), "composition_reduced": comp.reduced_composition.as_dict(), "formula_pretty": comp.reduced_formula, "formula_anonymous": comp.anonymized_formula, "chemsys": "-".join(elsyms), "volume": structure.volume, "density": structure.density, } return meta
605634ee52ea917cb8074d975a118af48a4a47b4
684,491
def swap(L, i, j): """Swaps elements i and j in list L.""" tmp = L[i] L[i] = L[j] L[j] = tmp return L
9cdc2979050e8e6049cc75656324307757a28fcd
684,495
def parse_gav(gav, defaults=(None, None, None)): """Parses the given GAV as a tuple. gav the GAV to parse. It must be a string with two colons separating the GAV parts. defaults a triple of default coordinates to return if a part from the input is missing Returns: a triple with the parsed GAV """ parts = gav.split(':') if len(parts) != 3: raise ValueError(f"Not a valid GAV pattern: {gav}") for index, value in enumerate(parts): if len(value) == 0: parts[index] = defaults[index] return tuple(parts)
280b8a38c4dffba7dac16f30c0418de7416727f7
684,498
def _get_indices(term, chunk): """Get indices where term appears in chunk Parameters ---------- term : str The token to look for in the `chunk` chunk : [str] A chunk of text in which to look for instances of `term` Returns ------- [int] Indices in `chunk` where `term` was found Examples -------- >>> term = 'a' >>> chunk = ['a', 'a', 'b', 'b', 'a'] >>> _get_indices(term, chunk) [0, 1, 5] """ return [i for i, token in enumerate(chunk) if token == term]
a9ae9f046e1266ec4fc96291d16c64ffb8a2e49a
684,499
import unicodedata def meta_tostring(metadata): """Convert the metadata dictionary to text header""" lines = [] lines.append('# {}\n'.format(metadata.get('title', ''))) for k, v in metadata.items(): if k != 'title': lines.append(f'{k}: {v}') return unicodedata.normalize('NFC', '\n'.join(lines) + '\n')
8863d2fa745c21e824181c113f26c22cc281a6b1
684,500
from typing import Any def get_cameras_and_objects(config: dict[str, Any]) -> set[tuple[str, str]]: """Get cameras and tracking object tuples.""" camera_objects = set() for cam_name, cam_config in config["cameras"].items(): for obj in cam_config["objects"]["track"]: camera_objects.add((cam_name, obj)) return camera_objects
89fdb11eaacfc547cf500f94ef993efd9d339b93
684,502
def dt_to_camli_iso(dt): """ Convert a datetime to iso datetime compatible with camlistore. """ return dt.isoformat() + 'Z'
fe193c76d122f0195d511386ff3ba892b6c7b9aa
684,503
def floatOrNone(v, default=0.0, exctype=Exception): """Returns the float value of the given value, or default (which is normally 0.0) on error. Catches exceptions of the given exctype (Exception by default)""" try: return float(v) except exctype: return default
c0b1b0152cdc39678dc437f821054e2dd9f01a61
684,505
import torch def get_mrr(indices, targets): #Mean Receiprocal Rank --> Average of rank of next item in the session. """ Calculates the MRR score for the given predictions and targets Args: indices (Bxk): torch.LongTensor. top-k indices predicted by the model. targets (B): torch.LongTensor. actual target indices. Returns: mrr (float): the mrr score """ tmp = targets.view(-1, 1) targets = tmp.expand_as(indices) hits = (targets == indices).nonzero() ranks = hits[:, -1] + 1 ranks = ranks.float() rranks = torch.reciprocal(ranks) mrr = torch.sum(rranks).data / targets.size(0) return mrr
ae177696d4b7afc24ae71e67896be1c727a32efa
684,506
def get_valid_results(df_res): """Splits up results into valid and invalid results. Based on properties computed in data.py.""" # Select valid results (one pair of resized ellipses) cond_valid = df_res['inside'] & df_res['resized'] & (df_res['num_annot']==2) df_res_valid = df_res.loc[cond_valid] # Everything else is excluded/invalid df_res_invalid = df_res.loc[~cond_valid] return df_res_valid, df_res_invalid
e5ee85899440512d4ca451f35928405aeae1d49b
684,510
import inspect from typing import Dict from typing import Any def _build_bool_option(parameter: inspect.Parameter) -> Dict[str, Any]: """Provide the argparse options for a boolean option.""" params = {"action": "store_false" if parameter.default is True else "store_true"} return params
e97e45743c45d9e4bc240555c0ec6fe01bd6a41a
684,513
def _get_default_image_id(module): """ Return the image_id if the image_id was specified through "source_details" or None. """ if "source_details" in module.params and module.params["source_details"]: source_details = module.params["source_details"] source_type = source_details.get("source_type") if not source_type: if "source_type" not in source_details: module.fail_json( msg="source_type required and must be one of: 'bootVolume', 'image'" ) if source_type == "image": return source_details["image_id"] return None
b151cacf948dc9d41f8010064781d634f31984ba
684,514
from pathlib import Path def get_test_path_services(filename, data_type): """ Gets the path of the filename for a given data type Args: filename (str): Name of file, not path data_type (str): Data type, CRDS, GC, ICOS etc Returns: pathlib.Path: Absolute path to object """ data_path = Path(__file__).resolve().parent.parent.parent.joinpath(f"data/proc_test_data/{data_type}/{filename}") return data_path
f9a12216a21b18403d5331db94a05f5322f53fd9
684,515
import pkg_resources def readfile(filename: str) -> str: """Read a file that is contained in the openclean_notebook package. This is a helper method to read Javascript files and HTML templates that are part of the openclean_notebook package. Returns a string containing the file contents. Parameters ---------- filename: string Relative path to the file that is being read. Returns ------- string """ return pkg_resources.resource_string(__name__, filename).decode('utf-8')
0a927ca89cc3ffb49c6f85328ff2cd830bfa8536
684,516
import string def add_numbering(ax, i=0, loc=(0.8, 0.8), label='', style='APS', numbering='abc', **kwargs): """ Add numbering (a,b,c,...) to axis. Parameters ---------- ax : matplotlib.pyplot.axis object i : int The axis index, e.g., i=1 -> (a) loc : tuple or list Position of label, relative to axis [x, y] where 0 <= x,y <= 1 (values outside this limit are allowed but may result in labels outside axis) label : string Override with custom label style : string If 'APS' or 'Nature' will use preset styles Otherwise if string matches format r'{i}' will use that as template string Otherwise style overwrites the label numbering : string Which type of numbering 'abc' -> a, b, c 'ABC' -> A, B, C '123' -> 1, 2, 3 'roman' -> i, ii, iii 'ROMAN' -> I, II, III **kwargs : string keyword arguments e.g. {'color', 'red'} Returns ------- ax : matplotlib.pyplot.axis object """ if not label: if i<0 or i>25: raise ValueError("i must be between 0 and 25 \ (support for i>25 will come in future version)") roman_nums = ['i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x',\ 'xi', 'xii', 'xiii', 'xiv', 'xv', 'xvi', 'xvii', 'xviii', \ 'xix', 'xx', 'xxi', 'xxii', 'xxiii', 'xxiv', 'xxv', 'xxvi'] # different formats: if numbering == 'abc': # a, b, c label = string.ascii_lowercase[i] elif numbering == 'ABC': # A, B, C label = string.ascii_uppercase[i] elif numbering == '123': # 1, 2, 3 label = i elif numbering == 'roman': # i, ii, iii, ... label = r'${}$'.format(roman_nums[i]) elif numbering == 'ROMAN': # I, II, III, ... label = r'{}'.format(roman_nums[i].upper()) else: raise ValueError("numbering option not a recognized value") if style == 'APS': label = r'({})'.format(label) elif style == 'Nature': label = r'\textbf{{{i}}}'.format(i=label) else: label = style.format(i=label) ax.text(loc[0], loc[1], label, transform=ax.transAxes, **kwargs) return ax
af45e454fb12eb9270b065c311d7860d9997025f
684,517
import re def normalize_tag_name(name): """ Normalize an EC2 resource tag to be compatible with shell environment variables. Basically it means the the following regex must be followed: [a-Z][a-Z0-9_]*. This function is not meant to handle all possible corner cases so try not to be stupid with tag naming. :param name: the tag name :return: a normalized version of the tag name """ result = name if name[0].isdigit(): result = "_" + name result = re.sub('[^0-9a-zA-Z_]+', '_', result) return result.upper()
3ae1974d144a0927561ed13ac620be5e38e02786
684,519
def cmd_name(python_name): """Convert module name (with ``_``) to command name (with ``-``).""" return python_name.replace('_', '-')
668f55611add8463b633e55f5657002cf35b7e81
684,521
def hash_string(s, is_short_hash=False): """Fowler–Noll–Vo hash function""" if not is_short_hash: h = 14695981039346656037 for c in s: h = ((h ^ ord(c)) * 1099511628211) & 0xFFFFFFFFFFFFFFFF if h == 0: h = 1 # Special case for our application (0 is reserved internally) return h else: h = 2166136261 for c in s: h = ((h ^ ord(c)) * 16777619) & 0xFFFFFFFF if h == 0: h = 1 # Special case for our application (0 is reserved internally) return h
c6718943c4a2791e3b336a543099ef4cd5a68335
684,522
import csv def create_mapping(path, src, dest, key_type=None) -> dict: """ Create a dictionary between two attributes. """ if key_type is None: key_type = int with open(path, 'r') as f: reader = csv.DictReader(f) raw_data = [r for r in reader] mapping = {key_type(row[src]): row[dest] for row in raw_data} return mapping
41c7c9230ffdced8ff8d27d4cf9ea0968febea3d
684,524
def set_axis(mode_pl): """Sets Active Axes for View Orientation. Note: Sets indices for axes from taper vectors Axis order: Rotate Axis, Move Axis, Height Axis Args: mode_pl: Taper Axis Selector variable as input Returns: 3 Integer Indicies. """ order = { "RX-MY": (0, 1, 2), "RX-MZ": (0, 2, 1), "RY-MX": (1, 0, 2), "RY-MZ": (1, 2, 0), "RZ-MX": (2, 0, 1), "RZ-MY": (2, 1, 0), } return order[mode_pl]
9f3cd41bc48eb191d1bdb214ddc868fc5698f30b
684,526
import pickle def load_pickle(pickle_path): """Utility function for loading .pkl pickle files. Arguments --------- pickle_path : str Path to pickle file. Returns ------- out : object Python object loaded from pickle. """ with open(pickle_path, "rb") as f: out = pickle.load(f) return out
436496808be87c92fb6e46bcd4e110984ed64c85
684,530
def update_params(params, **kwargs): """ Updates a dictionary with the given keyword arguments Parameters ---------- params : dict Parameter dictionary **kwargs : kwargs New arguments to add/update in the parameter dictionary Returns ------- params : dict Updated parameter dictionary """ for kwarg in kwargs: if kwargs.get(kwarg, None)!=None: params[kwarg]=kwargs[kwarg] return params
2b2b8ccd68885b4f8f26267e13447a07feceffc3
684,532
def docker_push_age(filename): """Check when the Docker image was last pushed to Docker Hub.""" try: with open(filename, "r") as handle: return float(handle.read().strip()) except FileNotFoundError: return 0
68432194c3623d9586b7df2f671ff9e722a23e7a
684,536
def is_triangle(triangle: int): """any tn in triangle sequence is t = ½n(n+1): Solving with quadratic formula reveals n is only an integer if (1+8t) ** 0.5 is an integer and odd""" if int((1+8*triangle)**0.5) == (1+8*triangle)**0.5 and ((1+8*triangle)**0.5)%2 == 1: return True return False
df568b8610b0e2cd99b06e91b0a73e3724e487a9
684,539
import re def make_write_file_block(content:str, outname:str, directory:str='/usr/local/etc/ehos/'): """ makes a yaml write_file content block Args: content: what to enter as content into the block outname: the filename the yaml points to directory: the directory the yaml points to Returns write_file block for a yaml file (str) Raises: None """ block = """- content: | {content} path: {filepath} owner: root:root permissions: '0644' """ # pad the write_file_block with 8 whitespaces to ensure yaml integrity content = re.sub("\n", "\n ", " "+content) block = re.sub('{filepath}', "{}/{}".format(directory, outname), block) block = re.sub('{content}', "{}".format(content), block) return block
e1c48cf17f2f1483a2f79d0fe9382215aa8d39b1
684,541
def flatten_dict(a_dict, parent_keys=None, current_parent_key=None): """Given a dict as input, return a version of the dict where the keys are no longer nested, and instead flattened. EG: >>> flatten_dict({"a": {"b": 1}}) {"a.b": 1} NB: The kwargs are only for internal use of the function and should not be used by the caller. """ if parent_keys is None: parent_keys = [] for key, value in a_dict.items(): if current_parent_key: key = "%s.%s" % (current_parent_key, key) if isinstance(value, dict): flatten_dict(value, parent_keys=parent_keys, current_parent_key=key) else: parent_keys.append((key, value)) return dict(parent_keys)
cdd69b1d5119d83c36266c13c5bec772ae138cb5
684,542
def filter(record): """ Filter for testing. """ return record if record["str"] != "abcdef" else None
cbdbbf519043417308d903c4af2afa7b06d8557b
684,544
def sort(source_list, ignore_case=False, reverse=False): """ :param list source_list: The list to sort :param bool ignore_case: Optional. Specify true to ignore case (Default False) :param bool reverse: Optional. Specify True to sort the list in descending order (Default False) :return: The sorted list :rtype: list >>> sort( ['Bad','bored','abe','After']) ['After', 'Bad', 'abe', 'bored'] >>> sort( ['Bad','bored','abe','After'], ignore_case=True) ['abe', 'After', 'Bad', 'bored'] """ sorted_list = source_list.copy() if ignore_case: sorted_list.sort(key=lambda s: s.casefold(), reverse=reverse) else: sorted_list.sort(reverse=reverse) return sorted_list
7ece5826c9b99b21fcd5d764a01b0e738fb1ae6c
684,550
def get_scene(videoname): """ActEV scene extractor from videoname.""" s = videoname.split("_S_")[-1] s = s.split("_")[0] return s[:4]
150aa651fa799b66c03fd1bc96d18745ffcb8007
684,553
def VtuFieldComponents(vtu, fieldName): """ Return the number of components in a field """ return vtu.ugrid.GetPointData().GetArray(fieldName).GetNumberOfComponents()
9c2c997cf4ec3e417d1b7d2568fa7a2e9aee4e8d
684,554
def get_note_title(note): """get the note title""" if 'title' in note: return note['title'] return ''
55c18473daf98b4a1fc2023a8ffd258b1e9df8bd
684,556
def merge(source, target): """Merge a source dictionary into a target dictionary :param source: source dictionary :param target: target dictionary :return: source merged into target """ for key, value in source.items(): if isinstance(value, dict): # get node or create one if key in target and not target[key]: target[key] = {} node = target.setdefault(key, {}) merge(value, node) else: if key in target: target_value = target[key] if isinstance(target_value, list) and isinstance(value, list): target_value.extend(value) target[key] = target_value else: target[key] = value return target
813b83fdc749bf3017e2e6cedece7ceb96651d4e
684,557
def get_type_and_tile(erow): """ Trivial function to return the OBSTYPE and the TILEID from an exposure table row Args: erow, Table.Row or dict. Must contain 'OBSTYPE' and 'TILEID' as keywords. Returns: tuple (str, str), corresponding to the OBSTYPE and TILEID values of the input erow. """ return str(erow['OBSTYPE']).lower(), erow['TILEID']
e2bf59c4b352865a55ce5a8bcaa2499811a92aaa
684,558
def inject(variables=None, elements=None): """ Used with elements that accept function callbacks. :param variables: Variables that will be injected to the function arguments by name. :param elements: Elements that will be injected to the function arguments by name. """ def wrapper(fn): fn.inject = { 'variables': variables or [], 'elements': elements or [] } return fn return wrapper
9b7ad2679acb4e1a3e01b0191d0af96fb88cc2de
684,564
def increment(number): """Increases a give number by 1.""" return number + 1
030bbc9452e7c4d56fc29fcc53aadcf4a2ae6835
684,566
from typing import List from typing import Tuple def three_variables_large_mean_differences() -> List[Tuple[Tuple[float, float, float], float]]: """Each entry represents three random variables. The format is: ( (mean1, mean2, mean3), covariance_scale ) and covariance scale controls how big the covariance between any two variables can be. Note: this data set is created in such way that differences between means are always much larger than any covariances. This makes the approximation very accurate. """ return [ ((1.0, 10.0, 20.0), 0.1), ((2.0, 9.0, 25.1), 1.0), ((0.1, 3.0, 5.0), 0.1), ]
047f544dd15196bcb0494098451fc4f9c9edfc87
684,567
def pivot_genres(df): """Create a one-hot encoded matrix for genres. Arguments: df -- a dataFrame containing at least the columns 'movieId' and 'genre' Output: a matrix containing '0' or '1' in each cell. 1: the movie has the genre 0: the movie does not have the genre """ return df.pivot_table(index = 'business_id', columns = 'categorie', aggfunc = 'size', fill_value=0)
ecf2754b0d1504cb207be04798d1bf3cd315c9d6
684,571
def check_error(http): """ Checks for http errors (400 series) and returns True if an error exists. Parameters ---------- http : addinfourl whose fp is a socket._fileobject Returns ------- has_error : bool """ err = http.code if 400<= err < 500: print("HTTP error {}. Make sure your PV exists.".format(err)) return True return False
20ba1426758e68b196708ec3260ff1f3a86c2820
684,573
def bproj(M, P): """Project batched marices using P^T M P Args: M (torch.tensor): batched matrices size (..., N, M) P (torch.tensor): Porjectors size (..., N, M) Returns: torch.tensor: Projected matrices """ return P.transpose(1, 2) @ M @ P
5334ed8c045ade690aad954663edd2ef0dbc561c
684,574
def is_sequence(nums): """Return True if a set of numbers is sequential i.e. 8, 11, 14, 17.""" nums = sorted(list(nums)) if len(nums) < 3: return False while nums: first_difference = nums[1] - nums[0] second_difference = nums[2] - nums[1] if first_difference != second_difference: return False if len(nums) < 4: break nums.pop(0) return True
a824f7f890040178c6df1ba8c1c67b26069258ca
684,575
def PickUpTextBetween(text,startstr,endstr): """ Pick up lines between lines having "stratstr" and "endstr" strings :param str text: text data :param str startstr: start string :param str endstr: end string :return: text list :rtype: lst """ rtext=""; start=False for ss in text: s=ss.strip(); s.replace('\n','') if start: if endstr != "": if s.find(endstr) >=0: start=False else: if s.strip() == "": start=False if not start: continue if not start: if startstr != "": if s.find(startstr) >=0: start=True else: if s.strip() == "": start=True if start: continue if start: rtext=rtext+ss+"\n" return rtext
c92cec3f445110a011dc1b885e1161ceb71d2342
684,579
from typing import Set from pathlib import Path import yaml def _ci_patterns() -> Set[str]: """ Return the CI patterns given in the CI config file. """ repository_root = Path(__file__).parent.parent ci_file = repository_root / '.github' / 'workflows' / 'ci.yml' github_workflow_config = yaml.safe_load(ci_file.read_text()) matrix = github_workflow_config['jobs']['build']['strategy']['matrix'] ci_pattern_list = matrix['ci_pattern'] ci_patterns = set(ci_pattern_list) assert len(ci_pattern_list) == len(ci_patterns) return ci_patterns
e91289fb6e365f4d185e9c5bf650992cba7c74dd
684,583
def get_center(bounds): """ Returns given element center coords:: from magneto.utils import get_center element = self.magneto(text='Foo') (x, y) = get_center(element.info['bounds']) :param dict bounds: Element position coordinates (top, right, bottom, left) :return: x and y coordinates of element center """ x = bounds['right'] - ((bounds['right'] - bounds['left']) / 2) y = bounds['bottom'] - ((bounds['bottom'] - bounds['top']) / 2) return x, y
050734d661b5cee6efcbd81407d08405123cdbad
684,585
def check_kwargs(input_kwargs, allowed_kwargs, raise_error=True): """Tests if the input `**kwargs` are allowed. Parameters ---------- input_kwargs : `dict`, `list` Dictionary or list with the input values. allowed_kwargs : `list` List with the allowed keys. raise_error : `bool` Raises error if ``True``. If ``False``, it will raise the error listing the not allowed keys. """ not_allowed = [i for i in input_kwargs if i not in allowed_kwargs] if raise_error: if len(not_allowed) > 0: allowed_kwargs.sort() raise TypeError("function got an unexpected keyword argument {}\n" "Available kwargs are: {}".format(not_allowed, allowed_kwargs)) else: return not_allowed
6d65e6f4b3f64e8ee1903b32907adb99e3bcf7df
684,587
def _optimize_ir(ctx, src): """Optimizes an IR file. The macro creates an action in the context that optimizes an IR file. Args: ctx: The current rule's context object. src: The source file. Returns: A File referencing the optimized IR file. """ entry = ctx.attr.entry args = ("--entry=" + entry) if entry else "" opt_ir_file = ctx.actions.declare_file(src.basename[:-2] + "opt.ir") ctx.actions.run_shell( outputs = [opt_ir_file], # The IR optimization executable is a tool needed by the action. tools = [ctx.executable._ir_opt_tool], # The files required for optimizing the IR file also requires the IR # optimization executable. inputs = [src, ctx.executable._ir_opt_tool], command = "{} {} {} > {}".format( ctx.executable._ir_opt_tool.path, src.path, args, opt_ir_file.path, ), mnemonic = "OptimizeIR", progress_message = "Optimizing IR file: %s" % (src.path), ) return opt_ir_file
3d236c538afa3584b85de8574873eb34dbda4aa5
684,590
def AUlight(exclude=()): """Return AU standard colors (light). Available colors: 'blue', 'purple', 'cyan', 'turquoise', 'green', 'yellow', 'orange', 'red', 'magenta', and 'gray'. Keyword arguments: exclude -- tuple of strings (default empty) """ AU_colors = {'blue' :( 0, 61,115), 'purple' :(101, 90,159), 'cyan' :( 55,160,203), 'turquoise':( 0,171,164), 'green' :(139,173, 63), 'yellow' :(250,187, 0), 'orange' :(238,127, 0), 'red' :(226, 0, 26), 'magenta' :(226, 0,122), 'gray' :(135,135,135)} for ex in exclude: AU_colors.pop(ex) return AU_colors
557ac5bc7d38ea059d0939e51c8b6063c9231782
684,594
def closest_power_2(x): """ Returns the closest power of 2 that is less than x >>> closest_power_2(6) 4 >>> closest_power_2(32) 16 >>> closest_power_2(87) 64 >>> closest_power_2(4095) 2048 >>> closest_power_2(524290) 524288 """ n=1 while 2**n <x: n = n+1 return 2**(n-1)
ce5531c1bd264cd4496aca74498f7105c24eaaad
684,598
def stateful_flags(rep_restart_wait=None, quorum_loss_wait=None, standby_replica_keep=None): """Calculate an integer representation of flag arguments for stateful services""" flag_sum = 0 if rep_restart_wait is not None: flag_sum += 1 if quorum_loss_wait is not None: flag_sum += 2 if standby_replica_keep is not None: flag_sum += 4 return flag_sum
b5ed2e1558dcc40e56700d7c32dcd7c07ee6c589
684,602
import random def random_bool(probability=0.5): """Returns True with given probability Args: probability: probability to return True """ assert (0 <= probability <= 1), "probability needs to be >= 0 and <= 1" return random.random() < probability
45b3ec3f15218df3da7b962301dc5174dbfa11c7
684,604
def seperate_messsage_person(message_with_person): """ Seperates the person from the message and returns both """ # Split by the first colon split_colon = message_with_person.split(":") # Get the person out of it and avoid the first whitespace person = split_colon.pop(0)[1:] # Stitch the message together again message = " ".join(split_colon) return person, message
b5a9549caec7a0083de7d3c980fca8b2a10560bb
684,605
from datetime import datetime from dateutil import tz def _epoch_to_datetime(epoch_seconds): """ Converts a UTC UNIX epoch timestamp to a Python DateTime object Args: epoch_seconds: A UNIX epoch value Returns: DateTime: A Python DateTime representation of the epoch value """ if type(epoch_seconds) == datetime: return epoch_seconds else: return datetime.fromtimestamp(int(epoch_seconds), tz=tz.tzutc())
a4bab04e6e80698631e2200a9824a2befec29502
684,608
def list_arg(raw_value): """argparse type for a list of strings""" return str(raw_value).split(",")
38e411618a0508c3639802fe67615aa29df8fcb2
684,609
def _get_nodes_without_in_edges(graph): """Get all nodes in directed graph *graph* that don't have incoming edges. The graph is represented by a dict mapping nodes to incoming edges. Example: >>> graph = {'a': [], 'b': ['a'], 'c': ['a'], 'd': ['b']} >>> _get_nodes_without_in_edges(graph) ({'a'}, {'b': set(), 'c': set(), 'd': {'b'}}) :param graph: A dict mapping nodes to incoming edges. :return: The set of nodes without incoming edges and the graph with these nodes removed. """ nextlevel = set() for node, deps in graph.items(): if not deps or deps == {node}: nextlevel.add(node) filtered_graph = {} for node, deps in graph.items(): if node in nextlevel: continue filtered_graph[node] = \ {dep for dep in deps if dep not in nextlevel} return nextlevel, filtered_graph
367a7c136e795f768a96bb6b013a09f0fb6ed967
684,614
import re def sentences(s): """Convert a string of text to a list of cleaned sentences.""" result = [] for sentence in s.split('.'): sentence = re.sub(r"[^A-Za-z0-9 ']", " ", sentence) sentence = re.sub(r"[ ]+", " ", sentence).strip() result.append(sentence) return result
7139c9d1595d12fc18e17a45c1cadafcb8f838f7
684,615
def generate_pents(limit): """Generates pentagonal numbers up to limit""" n = 1 pents = [] while n*(3*n - 1)/2 < limit: pents.append((n*(3*n - 1)//2)) n += 1 return pents
587cb390e8cc9f23d186e1a924453165520516a6
684,618
def _is_plain_value(dict_): """Return True if dict is plain JSON-LD value, False otherwise.""" return len(dict_) == 1 and '@value' in dict_
07a3d6167195ae7486a7e7a985f9b415829f7a42
684,622
def normalize_boolean(value): """ Normalize a boolean value to a bool(). """ if isinstance(value, bool): return value if isinstance(value, str): if value.lower().strip() == "true": return True if value.lower().strip() == "false": return False raise ValueError("Cannot convert {} to boolean".format(value))
9dcc91e0e0806c7b1538cb18435857608e56d184
684,624
def full_rgiid(rgiid, region_id): """ return the full rgiid from region and rgi_id """ full_id = f"RGI60-{region_id}.{rgiid}" return full_id
5fd2cd275c922cb84bb3d8ab3b2373ad240be160
684,626
def get_snx_host_ioc_context(indicator, ioc_type, threat_data): """ Make the dictionary for SlashNext IoC contexts for hosts :param indicator: IoC value :param ioc_type: IoC type :param threat_data: Threat data by SlashNext OTI cloud :return: SlashNext IoC context dictionary """ snx_ioc_cont = { 'Value': indicator, 'Type': ioc_type, 'Verdict': threat_data.get('verdict'), 'ThreatStatus': threat_data.get('threatStatus'), 'ThreatType': threat_data.get('threatType'), 'ThreatName': threat_data.get('threatName'), 'FirstSeen': threat_data.get('firstSeen'), 'LastSeen': threat_data.get('lastSeen') } return snx_ioc_cont
9b384614d3242899d39c5105bd565520f8926fca
684,627
def navigate_diagnosis(driver): """ Searches for submit button """ return driver.find_element_by_css_selector("input[type='submit']")
61b917be9441d897de2e4653a3d07d1f9a4642c8
684,630