content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_delegated_OTP_keys(permutation, x_key, z_key, num_qubits=14, syndrome_cnots = [[14, 0], [14, 2], [14, 4], [14, 6], [15, 1], [15, 2], [15, 5], [15, 6], [16, 3], [16, 4], [16, 5], [16, 6], [17, 7], [17, 9], [17, 11], [17, 13], [18, 8], [18, 9], [18, 12], [18, 13], [19, 10], [19, 11], [19, 12], [19, 13]]): """ Get delegated, post-processed, classical one-time pad keys for a program Parameters: permutation ([int]): permutation key x_key ([int]): X part of the non-delegated one-time pad key z_key ([int]): Z part of the non-delegated one-time pad key num_qubits (int): number of data qubits syndrome_cnots ([[int,int]]): all cnot gates used to derive error syndromes Returns: delegated_x_key ([int]): classically processed and delegated X part of one-time pad key delegated_z_key ([int]): classically processed and delegated Z part of one-time pad key """ permuted_cnots = [] for gate in syndrome_cnots: permuted_cnots.append([gate[0],permutation.index(gate[1])]) new_x_key = x_key[:] new_z_key = z_key[:] for cnot in permuted_cnots: a = new_x_key[cnot[0]] b = new_z_key[cnot[0]] c = new_x_key[cnot[1]] d = new_z_key[cnot[1]] new_x_key[cnot[0]] = a new_z_key[cnot[0]] = b+d new_x_key[cnot[1]] = a+c new_z_key[cnot[1]] = d #hadamard operator delegation for i in range(num_qubits,num_qubits + int(num_qubits/7*3)): new_x_key[i], new_z_key[i] = new_z_key[i], new_x_key[i] delegated_x_key = [i%2 for i in new_x_key] delegated_z_key = [i%2 for i in new_z_key] return delegated_x_key, delegated_z_key
35054ac76220fd332738a1e071aca769fd006e6c
684,912
import requests def get_user_api_token(logger, username, password): """ Generate iAuditor API Token :param logger: the logger :return: API Token if authenticated else None """ generate_token_url = "https://api.safetyculture.io/auth" payload = "username=" + username + "&password=" + password + "&grant_type=password" headers = { 'content-type': "application/x-www-form-urlencoded", 'cache-control': "no-cache", } response = requests.request("POST", generate_token_url, data=payload, headers=headers) if response.status_code == requests.codes.ok: return response.json()['access_token'] else: logger.error('An error occurred calling ' + generate_token_url + ': ' + str(response.json())) return None
b123a8515a4aff1977608e68502caef497777c73
684,914
from typing import List from typing import Any import fnmatch def get_elements_fnmatching(l_elements: List[Any], s_fnmatch_searchpattern: str) -> List[str]: """get all elements with type str which are matching the searchpattern >>> get_elements_fnmatching([], 'a*') [] >>> get_elements_fnmatching(['abc', 'def', 1, None], 'a*') ['abc'] """ if not l_elements: return l_elements ls_results: List[str] = [] for s_element in l_elements: if isinstance(s_element, str): if fnmatch.fnmatch(s_element, s_fnmatch_searchpattern): ls_results.append(s_element) return ls_results
f19665b68afc0a158cadbeb4abeac641fd3cb78c
684,916
def resolve_keywords_array_string(keywords: str): """ Transforms the incoming keywords string into its single keywords and returns them in a list Args: keywords(str): The keywords as one string. Sometimes separates by ',', sometimes only by ' ' Returns: The keywords in a nice list """ ret_list = [] if keywords is not None: # first make sure no commas are left keywords = keywords.replace(",", " ") key_list = keywords.split(" ") for key in key_list: key = key.strip() if len(key) > 0: ret_list.append(key) return ret_list
d7a2e31c2202ec2080e50d6ceb9f2cb28bf72251
684,923
import re def get_test_keys(data): """Return case keys from report string. Args: data(str): test case results Returns: list[str]: list of report keys """ keys_rules = re.compile('<success>(.*?)</success>') return keys_rules.findall(data)
cc762d02e25e93597c3b683a1ceee6b2105e6019
684,925
def get_return_assign(return_type: str) -> str: """ int foo() => 'int r =' void foo() => '' """ s = return_type.strip() if 'void' == s: return '' return '{} r ='.format(s)
4d89adb98104844f3af69650aa6583af5dcd4368
684,927
def get_state_root(spec, state, slot) -> bytes: """ Return the state root at a recent ``slot``. """ assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT return state.state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
ecb1333a5ca8e0958a08aa03ac84e8fa11987caa
684,931
def apply_uv_coverage(Box_uv, uv_bool): """Apply UV coverage to the data. Args: Box_uv: data box in Fourier space uv_bool: mask of measured baselines Returns: Box_uv """ Box_uv = Box_uv * uv_bool return Box_uv
0049c9ce213769895f8f9b3b5a62d9ccd35fec70
684,935
import torch def box_cxcywh_to_xyxy(x: torch.Tensor): """ Change bounding box format: w --------- xy------- | | | | h | xy | --> | | | | | | --------- -------xy """ x_c, y_c, w, h = x.unbind(1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=1)
0b4972ff121786e2347374476222903232a8fbae
684,941
def validate_extra(value: dict, context: dict = {}) -> dict: """ Default extra validator function. Can be overriden by providing a dotted path to a function in ``SALESMAN_EXTRA_VALIDATOR`` setting. Args: value (str): Extra dict to be validated context (dict, optional): Validator context data. Defaults to {}. Raises: ValidationError: In case data is not valid Returns: dict: Validated value """ return value
55b0996331c2dfafbebe62b882739f0f49dadc2c
684,947
def is_timezone_aware(value): """Check if a datetime is time zone aware. `is_timezone_aware()` is the inverse of `is_timezone_naive()`. :param value: A valid datetime object. :type value: datetime.datetime, datetime.time :returns: bool -- if the object is time zone aware. :raises: TypeError .. versionchanged:: 0.4.0 ``TypeError`` is raised .. versionadded:: 0.3.0 """ if not hasattr(value, 'tzinfo'): message = "'{0}' object is not a valid time." raise TypeError(message.format(type(value).__name__)) return not (value.tzinfo is None or value.tzinfo.utcoffset(value) is None)
49ce85512778b348f0d74df4e3e179ebe0ff00ed
684,952
def clear_object_store( securityOrigin: str, databaseName: str, objectStoreName: str ) -> dict: """Clears all entries from an object store. Parameters ---------- securityOrigin: str Security origin. databaseName: str Database name. objectStoreName: str Object store name. """ return { "method": "IndexedDB.clearObjectStore", "params": { "securityOrigin": securityOrigin, "databaseName": databaseName, "objectStoreName": objectStoreName, }, }
f13e0f80d6c57bd624ca2d911af66d62026dcdee
684,955
def bytes_to_block(block_size: int, i: int) -> slice: """ Given the block size and the desired block index, return the slice of bytes from 0 to the end of the given block. :param block_size: The block size. :param i: The block index. :return: slice of bytes from 0 to the end of the specified block index. """ return slice(0, block_size * (i + 1))
333816aa8c8e22c5715e1c585c4d9c34813d46f2
684,956
def extended_euclid_xgcd(a, b): """ Returns d, u, v = xgcd(a,b) Where d = ua + vb = gcd(a, b) """ s = 0 old_s = 1 t = 1 old_t = 0 r = b old_r = a while r != 0: quotient = old_r // r old_r, r = r, old_r - quotient * r old_s, s = s, old_s - quotient * s old_t, t = t, old_t - quotient * t d, u, v = old_r, old_s, old_t return d, u, v
e1bf4a26dff387e72b728d9871d34127e83b44ff
684,960
def sum_parameters(param): """ Sums the equation parameters which have the same exponent and polynomial term. (a, ni, bi) and (c, ni, bi) become (a + c, ni, bi). Parameters ----------- param: list list of tuples (Ai, ni, Bi) Returns ---------- out: list list of tuples (Ci, ni, Bi). """ # Dictionary of (ni, Bi): Ai Ai = {} for x in param: try: Ai[x[1:]] += x[0] except KeyError: Ai[x[1:]] = x[0] return [(Ai[x], x[0], x[1]) for x in Ai]
be29ccc3894fe2f1d434d45e751459be09eda432
684,963
def is_continuation(val): """Any subsequent byte is a continuation byte if the MSB is set.""" return val & 0b10000000 == 0b10000000
807f94ee51b143c2e5dbe2ce3d5d7f43e7e4947f
684,968
def number_keys(a_dictionary): """Return the number of keys in a dictionary.""" return (len(a_dictionary))
600f77617563add2a5763aa05c3861ee3d067cf1
684,971
def true_false_converter(value): """ Helper function to convert booleans into 0/1 as SQlite doesn't have a boolean data type. Converting to strings to follow formatting of other values in the input. Relying on later part of pipeline to change to int. """ if value == "True": return '1' elif value == "False": return '0' else: return value
b81ac423f3b176e57cf9087c5bc241ff64fdcc85
684,973
def celcius_2_kelvin(x): """Convert celcius to kelvin.""" return x + 273.15
8370fa6add469fc455c6a39b741462a4b8ee2e88
684,974
from typing import Dict from typing import Union from typing import List from typing import Any from functools import reduce def getitems(obj: Dict, items: Union[List, str], default: Any = None) -> Any: """ 递归获取数据 注意:使用字符串作为键路径时,须确保 Key 值均为字符串 :param obj: Dict 类型数据 :param items: 键列表:['foo', 'bar'],或者用 "." 连接的键路径: ".foo.bar" 或 "foo.bar" :param default: 默认值 :return: value """ if not isinstance(obj, dict): raise TypeError('Dict object support only!') if isinstance(items, str): items = items.strip(".").split(".") try: return reduce(lambda x, i: x[i], items, obj) except (IndexError, KeyError, TypeError): return default
c05c86ecab0bd1a4f83ac1cffbe23ca03f20b0d5
684,977
def compute_range(word_size,bits_per_sample): """ Get starting positions in word for groups of bits_per_sample bits. Notes ----- | | **Example:** | | word_size=32 | bits_per_sample=4 | list(compute_range(word_size,bits_per_sample)) | >>> [0, 4, 8, 12, 16, 20, 24, 28] """ return(range(0,word_size,bits_per_sample))
96a05ce64d5aab89df9a50aba1fe91fe87a08b99
684,978
def convert_to_bool(x) -> bool: """Convert string 'true' to bool.""" return x == "true"
bd207408f71559d7ee6f6df0adf4435a661eacdc
684,985
def _stretch_string(string, length): """Stretch the game title so that each game takes equal space. :param string: the string to stretch :type string: str :param length: the length that the string needs to be stretched to :type length: int :return: the stretched string :rtype: str """ for _ in range(length-len(string)): string += " " return string
91cf22f4fbe5dbf251b11da86b08cbc9ae12ff56
684,987
import json def load_json_file(jfile): """ Load json file given filename """ with open(jfile) as handle: j = json.load(handle) return j
b86153750eea0dbd9013fe718087dca6a5ce8426
684,988
def is_vowel(char: str) -> bool: """ returns True if the character is vowel else False >>> is_vowel('A') True >>> is_vowel('e') True >>> is_vowel('f') False """ vowels = ["A", "E", "I", "O", "U", "a", "e", "i", "o", "u"] # Check for empty string if not char: return False return char[0] in vowels
44d5fa1f5030496daf8dccafaffff03cb8e1d21c
684,992
def bisect_map(mn, mx, function, target): """ Uses binary search to find the target solution to a function, searching in a given ordered sequence of integer values. Parameters ---------- seq : list or array, monotonically increasing integers function : a function that takes a single integer input, which monotonically decreases over the range of seq. target : the target value of the function Returns ------- value : the input value that yields the target solution. If there is no exact solution in the input sequence, finds the nearest value k such that function(k) <= target < function(k+1). This is similar to the behavior of bisect_left in the bisect package. If even the first, leftmost value of seq does not satisfy this condition, -1 is returned. """ if function([mn]) < target or function([mx]) > target: return -1 while 1: if mx == mn + 1: return mn m = (mn + mx) / 2 value = function([m])[0] if value > target: mn = m elif value < target: mx = m else: return m
549a8be718f9cb8fbcb750fd4e8d59d5ddefdee3
684,993
def b(s): """ bytes/str/int/float -> bytes """ if isinstance(s, bytes): return s elif isinstance(s, (str,int,float)): return str(s).encode("utf-8") else: raise TypeError(s)
ea017005461b1c559ab7b1efe82315a7f7cb8f71
684,995
def p2q(plist, do_min=1, verb=1): """convert list of p-value to a list of q-value, where q_i = minimum (for m >= i) of N * p_m / m if do min is not set, simply compute q-i = N*p_i/i return q-values in increasing significance (i.e. as p goes large to small, or gets more significant) """ q = plist[:] q.sort() N = len(q) # work from index N down to 0 (so index using i-1) min = 1 for i in range(N,0,-1): ind = i-1 q[ind] = N * q[ind] / i if do_min: if q[ind] < min: min = q[ind] if min < q[ind]: q[ind] = min # and flip results q.reverse() return q
2b54f520adfa57452fbe232bd1707d88eaf52e7c
684,998
from pathlib import Path def get_packages(package): """ Return root package and all sub-packages. """ return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
6c568fc47122be49ed1a47325465deaf95056bff
685,001
import base64 def to_base64(full_path): """ Return the base64 content of the file path :param file_path: Path to the file to read :type file_path: str :return: File content encoded in base64 :rtype: str """ with open(full_path, "rb") as bin_file: return base64.b64encode(bin_file.read()).decode("ascii")
2d56464c8a886a4ad9240570fceeb9724762cc64
685,004
import time def get_timestamp(length=13): """ get current timestamp string >>> len(str(int(get_timestamp(10)))) 10 :param length: length of timestamp, can only between 0 and 16 :return: """ if isinstance(length, int) and 0 < length < 17: return int("{:.6f}".format(time.time()).replace(".", "")[:length]) raise ValueError("timestamp length can only between 0 and 16.")
d9658e8fd93cceef5345fdaa1a7a6dffbd40d454
685,006
def construct_user_data(user=None): """Return dict with user data The returned keys are the bare minimum: username, first_name, last_name and email. No permissions or is_superuser flags! """ user_data = {} for key in ["username", "first_name", "last_name", "email"]: user_data[key] = getattr(user, key) return user_data
62c409750c596aa07ee3fbd3003d2e7646deb84b
685,011
def get_books_by_author(args, books): """ Get books whose author name contains the arguments :param args: args object containing all arguments :param books: A list of book objects read from csv file :return: A dictionary with matched authors' names as key and a list of their book objects as value. """ if not args.author: return None author_book_dict = {} # Create a key value pair for every author that matches the arguments for arg in args.author: for book in books: if arg.lower() in book.author.lower(): if not book.author in author_book_dict.keys(): author_book_dict[book.author] = [] # Fill in the books written by every author in the dictionary for book in books: if book.author in author_book_dict.keys(): author_book_dict[book.author].append(book) return author_book_dict
462004ebf3bd5979a43c126f7871364e715b18f1
685,016
def parse_clusters(cluster_file): """ expects one line per cluster, tab separated: cluster_1_rep member_1_1 member 1_2 ... cluster_2_rep member_2_1 member_2_2 ... """ cluster_dict = {} with open(cluster_file) as LINES: for line in LINES: genes = line.strip().split("\t") rep = genes[0] for gene in genes: cluster_dict[gene] = rep return cluster_dict
88243b7a142224e755eb2046c11a90b352d5835d
685,017
def _new_array(ctype, size): """Create an ctypes.Array object given the ctype and the size of array.""" return (size * ctype)()
4cd327f800cd2f296ce7881b4d6545edd5fffc27
685,019
import json def to_json(**kwargs): """Convert input arguments to a formatted JSON string as expected by the EE API. """ return {'jsonRequest': json.dumps(kwargs)}
28142eacc3617d1f9d793e0d6b813855b7284c2d
685,021
def _masked_loss(loss, mask, eps=1e-8): """ Average the loss only for the visible area (1: visible, 0: occluded) """ return (loss * mask).sum() / (mask.sum() + eps)
98159a19e560f62c702ad2f5ab6c632c68d94c84
685,023
def _roundn(num, n): """Round to the nearest multiple of n greater than or equal to the given number. EMF records are required to be aligned to n byte boundaries.""" return ((num + n - 1) // n) * n
908617c1422e9c7b47cfe473a048bf31c8fed024
685,025
import re def _split_by_punctuation(chunks, puncs): """Splits text by various punctionations e.g. hello, world => [hello, world] Arguments: chunks (list or str): text (str) to split puncs (list): list of punctuations used to split text Returns: list: list with split text """ if isinstance(chunks, str): out = [chunks] else: out = chunks for punc in puncs: splits = [] for t in out: # Split text by punctuation, but not embedded punctuation. E.g. # Split: "Short sentence. Longer sentence." # But not at: "I.B.M." or "3.424", "3,424" or "what's-his-name." splits += re.split(r'(?<!\.\S)' + punc + r'\s', t) out = splits return [t.strip() for t in out]
ee05fab4ea30ab20da65c68bd2d68cd0d2a5ae85
685,027
def filename_from_path(full_path): """ given file path, returns file name (with .xxx ending) """ for i in range(len(full_path)): j = len(full_path) - 1 - i if full_path[j] == "/" or full_path[j] == "\\": return full_path[(j + 1):]
ee06d5fbb9b8cd6631295d9920e2c131e336d710
685,028
def type_or_none(default_type): """ Convert the string 'None' to the value `None`. >>> f = type_or_none(int) >>> f(None) is None True >>> f('None') is None True >>> f(123) 123 """ def f(value): if value is None or value == 'None': return None return default_type(value) return f
151a972c040581d94ac56f944d18a9a262343c89
685,031
import random import string def random_string() -> str: """ Generate a random string """ k = random.randint(5, 10) return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
a0323d92764096f22033b11d842ae95c59ca07a5
685,032
def get_dev_raw_data_source(pipeline_builder, raw_data, data_format='JSON', stop_after_first_batch=False): """ Adds a 'Dev Raw Data Source' stage to pipeline_builder and sets raw_data, data_format and stop_after_first_batch properties of that stage. Returns the added stage""" dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source') dev_raw_data_source.data_format = data_format dev_raw_data_source.raw_data = raw_data dev_raw_data_source.stop_after_first_batch = stop_after_first_batch return dev_raw_data_source
4ce08a20bb1b40b666ffdee519c04b358b1f4632
685,039
import colorsys import random def random_colors(N, bright=False): """ Generate random colors. To get visually distinct colors, generate them in HSV space then convert to RGB. """ brightness = 1.0 if bright else 0.7 hsv = [(i / N, 1, brightness) for i in range(N)] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.shuffle(colors) return colors
3be6bdea2f2b3b3aab8a934aea369250eaaab1ae
685,040
def clean(s): """ Attempt to render the (possibly extracted) string as legible as possible. """ result = s.strip().replace("\n", " ") result = result.replace("\u00a0", " ") # no-break space result = result.replace("\u000c", " ") # vertical tab while " " in result: result = result.replace(" ", " ") return result
e831dc186517ed9e2a46a03b675a71ff73c0686f
685,044
def compute_Obj(Y, A, K): # main objective """ sum_{j=2}^K of (Y_{:,j})^TA(Y_{:,j}) / ((Y_{:,j})^T(Y_{:,j})) """ num, de = 0, 0 for i in range(K-1): num += (Y[:,i+1].T).dot(A.dot(Y[:,i+1])) de += Y[:,i+1].T@Y[:,i+1] return (num / de)
087c10c6a4585d8f58e48c3f21dc75afe1ebda2a
685,045
def indexOfLargestInt(listOfInts): """ return index of largest element of non-empty list of ints, or False otherwise That is, return False if parameter is an empty list, or not a list parameter is not a list consisting only of ints By "largest", we mean a value that is no smaller than any other value in the list. There may be more than one instance of that value. Example: in [7,3,7], 7 is largest By "index", we mean the subscript that will select that item of the list when placed in [] Since there can be more than one largest, we'll return the index of the first such value in those cases, i.e. the one with the lowest index. >>> indexOfLargestInt([]) False >>> indexOfLargestInt('foo') False >>> indexOfLargestInt([3,5,4.5,6]) False >>> indexOfLargestInt([40]) 0 >>> indexOfLargestInt([-90,40,70,80,20]) 3 >>> indexOfLargestInt([10,30,50,20,50]) 2 >>> """ if type(listOfInts)!=list or listOfInts==[]: return False # Now we know there is at least one item in the list. # We make an initial assumption that this item will be the largest. # We then check every other item in the list indexOfMaxSoFar = 0 # the one in position zero is the first candidate # Note: we have to start from 0 because we need to check the type # of element[0] to see if it is an int. Otherwise, we could start from 1 for i in range(0,len(listOfInts)): # all indexes in the list if type(listOfInts[i])!=int: # make sure it is an int return False if listOfInts[i] > listOfInts[indexOfMaxSoFar]: # compare new item indexOfMaxSoFar = i # we have a new candidate # Now we've gone through the entire list. If some other index were that # of a larger int, we would have changed indexOfMaxSoFar to that. So # what we are left with must be the index of the largest one. return indexOfMaxSoFar
d3beea294099cfb13cfcf814612db0d4fc06b3f0
685,046
from typing import Optional from pathlib import Path def relative_example_path(example_id: str, data_name: Optional[str] = None): """ Returns the relative path from a train or test directory to the data file for the given example_id and data_name. """ prefix = data_name + "_" if data_name else "" return ( Path(example_id[0]) / example_id[1] / example_id[2] / (prefix + example_id + ".npy") )
7a9d1c70d459496bca1994ee5360ceef09741d20
685,047
def rshift(val, n): """ Python equivalent to TypeScripts >>> operator. @see https://stackoverflow.com/questions/5832982/how-to-get-the-logical-right-binary-shift-in-python """ return (val % 0x100000000) >> n
5bdaf75adc159843836acd8850ca9f40a023b386
685,048
def is_sale(this_line): """Determine whether a given line describes a sale of cattle.""" is_not_succinct = len(this_line.split()) > 3 has_price = '$' in this_line return has_price and is_not_succinct
ee8e88fb6f79b11579534f815d54a6c401859f00
685,049
import itertools def sparse_dict_from_array(array, magnitude_threshold=0): """Converts a array to a dict of nonzero-entries keyed by index-tuple.""" ret = {} for index_tuple in itertools.product(*(map(range, array.shape))): v = array[index_tuple] if abs(v) > magnitude_threshold: ret[index_tuple] = v return ret
26774934744c927f1625caedffbeb17203af68ea
685,051
from typing import Literal import unicodedata def unicode(text: str, *, form: Literal["NFC", "NFD", "NFKC", "NFKD"] = "NFC") -> str: """ Normalize unicode characters in ``text`` into canonical forms. Args: text form: Form of normalization applied to unicode characters. For example, an "e" with accute accent "´" can be written as "e´" (canonical decomposition, "NFD") or "é" (canonical composition, "NFC"). Unicode can be normalized to NFC form without any change in meaning, so it's usually a safe bet. If "NFKC", additional normalizations are applied that can change characters' meanings, e.g. ellipsis characters are replaced with three periods. See Also: https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize """ return unicodedata.normalize(form, text)
d854dc73c78bd37064557a496bd8704ffc9a7a34
685,055
def without_duplicates(args): """ Removes duplicated items from an iterable. :param args: the iterable to remove duplicates from :type args: iterable :return: the same iterable without duplicated items :rtype: iterable :raise TypeError: if *args* is not iterable """ if hasattr(args, '__iter__') and not isinstance(args, str): if args: return type(args)(set(args)) else: return args else: raise TypeError("Expected iterable, got {args_type} insted".format(args_type=type(args)))
02eed8fbe9138ffc4483170571562dfa29ae210e
685,057
def create_vulnerability_dictionary(qid, title, ip, name, category, severity, solution, diagnosis, consequence): """ Creates a vulnerability dictionary. :param qid: integer Qualys ID of the vulnerability. :param title: string, title of the vulnerability. :param ip: list of IP adresses (strings) affected by vulnerability. :param name: hostname associated to the IP :param category: string, category of vulnerability. :param severity: integer, severity level of the vulnerability. :param solution: string, how to fix the vulnerability. :param diagnosis: string, how the vulnerability was detected. :param consequence: string, consequences of the vulnerability. :return: vulnerability dictionary with the entered values. """ return { 'qid': qid, 'title': title, 'hosts': [{'ip': ip, 'name': name}], 'category': category, 'severity': severity, 'solution': solution, 'diagnosis': diagnosis, 'consequence': consequence, }
de5fb8a917d5aa22e1060bd987361062afbefa9c
685,060
def get_page_text(soup): """Return all paragraph text of a webpage in a single string. """ if soup is None: return '' paragraphs = [para.text for para in soup.select('p')] text = '\n'.join(paragraphs) return text
da9de4683fb6611d3bfc703fe491a2b7139252a3
685,065
import pickle def read_pickle(file_name): """ Reads a data dictionary from a pickled file. Helper function for curve and surface ``load`` method. :param file_name: name of the file to be loaded :type file_name: str :return: data dictionary :rtype: dict """ # Try opening the file for reading try: with open(file_name, 'rb') as fp: # Read and return the pickled file impdata = pickle.load(fp) return impdata except IOError: # Raise an exception on failure to open file raise IOError("File " + str(file_name) + " cannot be opened for reading.")
b1406ed56c2294a691d7958c63cd2fd35095fc3a
685,066
def get_appliance_dns( self, ne_id: str, cached: bool, ) -> dict: """Get DNS server IP addresses and domain configurations from Edge Connect appliance .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - dns - GET - /resolver/{neId}?cached={cached} :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE`` :type ne_id: str :param cached: ``True`` retrieves last known value to Orchestrator, ``False`` retrieves values directly from Appliance :type cached: bool :return: Returns dictionary of appliance DNS configuration \n * keyword **domain_search** (`dict`): DNS search domains \n * keyword **1** (`dict, optional`): Primary Domain \n * keyword **self** (`int`): 1 * keyword **domainname** (`str`): Search domain * keyword **2** (`dict, optional`): Secondary Domain \n * keyword **self** (`int`): 2 * keyword **domainname** (`str`): Search domain * keyword **3** (`dict, optional`): Tertiary Domain \n * keyword **self** (`int`): 3 * keyword **domainname** (`str`): Search domain * keyword **nameserver** (`dict`): DNS server \n * keyword **1** (`dict, optional`): Primary DNS server \n * keyword **self** (`int`): 1 * keyword **address** (`str`): IP address of DNS server * keyword **srcinf** (`str`): Source interface * keyword **vrf_id** (`int`): VRF ID number, e.g. ``0`` * keyword **2** (`dict, optional`): Secondary DNS server \n * keyword **self** (`int`): 2 * keyword **address** (`str`): IP address of DNS server * keyword **srcinf** (`str`): Source interface * keyword **vrf_id** (`int`): VRF ID number, e.g. ``0`` * keyword **3** (`dict, optional`): Tertiary DNS server \n * keyword **self** (`int`): 3 * keyword **address** (`str`): IP address of DNS server * keyword **srcinf** (`str`): Source interface * keyword **vrf_id** (`int`): VRF ID number, e.g. ``0`` :rtype: dict """ return self._get("/resolver/{}?cached={}".format(ne_id, cached))
4b401effd8e4e5b89be75128cdeacda9ed852975
685,068
from pathlib import Path def get_filesize(pathname: Path) -> int: """ Returns the size of a file in bytes. Parameters ---------- pathname : Path Returns ------- int """ return pathname.stat().st_size
cec1f8b64efbf2bf58a4f1798b789ebb87251e40
685,072
import hashlib def SimpleMerkleRoot(hashes, hash_function=hashlib.sha256): """ Return the "Simple" Merkle Root Hash as a byte blob from an iterable ordered list of byte blobs containing the leaf node hashes. Works by recursively hashing together pairs of consecutive hashes to form a reduced set one level up from the leafs, repeat until we are reduced to a single hash. If list is odd-length, then last element is copied, not hashed. """ def BytesHasher(msgbytes): return hash_function(msgbytes).digest() if len(hashes) == 0: hashes = [ BytesHasher(bytes()) ] # Hash of empty data #line = "" #for h in hashes: # line = line + h.hex() + " " #print(line) if len(hashes) == 1: return hashes[0] reduced = [] ilast = len(hashes) - 1 for i in range(len(hashes))[0::2]: # 0, 2, 4, 6, ... if i < ilast: pre = hashes[i] + hashes[i+1] reduced.append( BytesHasher(pre) ) else: reduced.append(hashes[i]) return SimpleMerkleRoot(reduced, hash_function)
d9e7bef38e332f674301b49a5d8f2af98bcd7c68
685,073
def round_robin_list(num, to_distribute): """Return a list of 'num' elements from 'to_distribute' that are evenly distributed Args: num: number of elements in requested list to_distribute: list of element to be put in the requested list >>> round_robin_list(5, ['first', 'second']) ['first', 'second', 'first', 'second', 'first'] >>> round_robin_list(3, ['first', 'second']) ['first', 'second', 'first'] >>> round_robin_list(4, ['first', 'second', 'third']) ['first', 'second', 'third', 'first'] >>> round_robin_list(1, ['first', 'second']) ['first'] """ if not to_distribute: return [] quotient = num // len(to_distribute) remainder = num - quotient * len(to_distribute) assignment = to_distribute * quotient assignment.extend(to_distribute[:remainder]) return assignment
69837d38dd131658280d5fee21524a090044990d
685,074
def inline(sconf): """ Return config in inline form, opposite of :meth:`config.expand`. Parameters ---------- sconf : dict Returns ------- dict configuration with optional inlined configs. """ if ( 'shell_command' in sconf and isinstance(sconf['shell_command'], list) and len(sconf['shell_command']) == 1 ): sconf['shell_command'] = sconf['shell_command'][0] if len(sconf.keys()) == int(1): sconf = sconf['shell_command'] if ( 'shell_command_before' in sconf and isinstance(sconf['shell_command_before'], list) and len(sconf['shell_command_before']) == 1 ): sconf['shell_command_before'] = sconf['shell_command_before'][0] # recurse into window and pane config items if 'windows' in sconf: sconf['windows'] = [inline(window) for window in sconf['windows']] if 'panes' in sconf: sconf['panes'] = [inline(pane) for pane in sconf['panes']] return sconf
6498d6711248f38032bcbdf1e694858542984704
685,081
import hashlib def md5sum(data): """ Return md5sum of data as a 32-character string. >>> md5sum('random text') 'd9b9bec3f4cc5482e7c5ef43143e563a' >>> md5sum(u'random text') 'd9b9bec3f4cc5482e7c5ef43143e563a' >>> len(md5sum('random text')) 32 """ return hashlib.md5(data).hexdigest()
b99b07105596289fd29335bbb0e2f9dc91e4e253
685,082
def parse_team_stats(stat_obj: dict) -> str: """ Currently, individual team's stats look like this from Dynamo: "Hotshots": { "GA": "25", "GF": "27", "GP": "7", "L": "3", "OTL": "0", "PTS": "8", "SOL": "0", "T": "0", "W": "4" } we turn these into a nice human readable line. :param stat_obj: dict containing a team's stats :return: strified version of this data """ line = "" for stat, val in stat_obj.items(): line += f"{stat}: {val}\t" return line + "\n"
7ee324c0bcdda2903be2fd82679dae7c7d46ed75
685,084
from typing import List def is_asset_blacklisted(name: str, blacklist: List[str]) -> bool: """ Check whether an asset must be filtered :param name :param blacklist :returns bool """ return any(map(lambda x : name.endswith(x), blacklist))
4bd86742bd68d3a603caa64d5672b4f84d9c4d08
685,085
def get_body_length(htc_struc, body): """Get the length of a body from htc structure, given string name""" body_contents = htc_struc.get_subsection_by_name(body).c2_def.contents last_key = next(reversed(body_contents)) length = abs(body_contents[last_key].values[-2]) return length
b2f6df3659d359974f982c8a56f8a3c70ed584da
685,087
def get_tcp_udp_port_data(self) -> dict: """Get list of TCP, UDP ports and their applications .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - spPortal - GET - /spPortal/tcpUdpPorts :return: Returns dictionary of ports and their corresponding TCP and UDP applications, parent keys are port number \n * keyword **<port_number>** (`dict`): protocol info object \n * keyword **tcp** (`list[dict]`): tcp information \n [`dict`]: protocol information \n * keyword **description** (`str`): port description * keyword **name** (`str`): port name * keyword **udp** (`list[dict]`): udp information \n [`dict`]: protocol information \n * keyword **description** (`str`): port description * keyword **name** (`str`): port name :rtype: dict """ return self._get("/spPortal/tcpUdpPorts")
5120bc8189cc903ac00267841754603aeb1013ee
685,088
def _RemoteSteps(api, app_engine_sdk_path, platform): """Runs the build steps specified in catapult_build/build_steps.py. Steps are specified in catapult repo in order to avoid multi-sided patches when updating tests and adding/moving directories. This step uses the generator_script; see documentation at github.com/luci/recipes-py/blob/master/recipe_modules/generator_script/api.py Use the test_checkout_path property in local tests to run against a local copy of catapult_build/build_steps.py. """ base = api.properties.get('test_checkout_path', str(api.path['checkout'])) script = api.path.join(base, 'catapult_build', 'build_steps.py') args = [ script, '--api-path-checkout', api.path['checkout'], '--app-engine-sdk-pythonpath', app_engine_sdk_path, '--platform', platform or api.platform.name, ] return api.generator_script(*args)
de6ee15783ca75582e57e55dea58c4a9c3706870
685,091
def ce(actual, predicted): """ Computes the classification error. This function computes the classification error between two lists :param actual : int, float, list of numbers, numpy array The ground truth value :param predicted : same type as actual The predicted value :returns double The classification error between actual and predicted """ return ( sum([1.0 for x, y in zip(actual, predicted) if x != y]) / len(actual))
10be7d124c56e888c674112e768050c2f9bbd5f6
685,092
async def hello(): """ Returns "hello from service1" """ return 'hello from service1'
95801d51a4a4cbdf2a5535f009be54b271f29091
685,097
def parsePoint(storedPt): """ Translates a string of the form "{1, 0}" into a float tuple (1.0, 0.0) """ return tuple(float(c) for c in storedPt.strip("{}").split(","))
e9d7f88c866fe0f504119258488df90b9e56024f
685,098
def spatial_overlap_conv_3x3_stride_2(p): """ This method computes the spatial overlap of 3x3 convolutional layer with stride 2 in terms of its input feature map's spatial overal value (p). """ return (1 + 2 * (1-p)) / (1 + 4 * (1 - p) * (2 - p))
5874bc24f0fefc771021268bcd83425c3298b822
685,100
def join(*args): """ Return a string composed of the result of applying str() to each one of the given args, separated by spaced, but only if the item exists. Example: >>> join('Hello', False, 1, True, 0, 'world') 'Hello 1 True world' """ strings = [str(arg) for arg in args if arg] return ' '.join(strings)
ec9236d51d68717b5de26fad9c5a74871e106270
685,101
from typing import List from typing import Dict def reduce_raw_dict(data: List[Dict[str, str]]) -> List[Dict[str, str]]: """ Initial data cleanup. Filter all logs that deal with accessing a "BernsteinConference" page and that contain "Completed" to remove "Started" duplicates. Further remove "/plugins" to filter loading the PDF viewer load logs. :param data: list containing docker log dictionaries. :return: list containing docker log dictionaries. """ fil_str = "BernsteinConference" fil_dat = list(filter(lambda log_entry: fil_str in log_entry["log"], data)) fil_dat = list(filter(lambda log_entry: "Completed" in log_entry["log"], fil_dat)) fil_dat = list(filter(lambda log_entry: "/plugins" not in log_entry["log"], fil_dat)) return fil_dat
7595977c3d5edbd70059c916eb631daad031a866
685,102
def build(name, builder): """Wrapper to turn (name, vm) -> val method signatures into (vm) -> val.""" return lambda vm: builder(name, vm)
61a87daca6702c84e3f790c6042f82059ceead17
685,103
def PrepareCorrResults(df): """ Pass it a dataframe, and it returns Pairwise Pearson correlation coefficient values for the entire variables of the datadframe. The first columns of the returned dfCORR contains correlation values of the classvariable versus all other variables. """ dfCORR = df.corr() dfCORR.reset_index(level=0, inplace=True) dfCORR = dfCORR.rename(columns={'index':'Variable'}) return dfCORR
fb9a8969e95f614e2c7e5603abfbbe3a87bf2b9f
685,105
def full_to_type_name(full_resource_name): """Creates a type/name format from full resource name.""" return '/'.join(full_resource_name.split('/')[-2:])
9bcb3985be683060e0b4028183c9da750f4c619c
685,108
def is_shuffle(stage: str) -> bool: """Whether shuffle input. Args: stage: Train, val, test. Returns: Bool value. """ is_sh = {'train': True, 'val': False, 'test': False} return is_sh[stage]
7529c23cc1170cb36af57e9cc7473005cda78366
685,109
def _get_item_by_name(cls, name): """ Gets the item from the corresponding class with the indicated name. :param cls: Class / Entity of the item. :param name: Case-insensitive name of the item. :return: The item if there is a match, None otherwise. """ if name: try: return cls.get(cls.name == name.lower()) except cls.DoesNotExist: return None return None
9e8f3b32f774ded572d1b49aee8f73de82f03e9c
685,113
import six def intersect(*masks): """Return new masks that are the per-layer intersection of the provided masks. Args: *masks: The set of masks to intersect. Returns: The intersection of the provided masks. """ result = {} for mask in masks: for layer, values in six.iteritems(mask): if layer in result: result[layer] *= values else: result[layer] = values return result
db883859a72299a9dea2188dd27b7b62c3d229ec
685,115
def get_label_dummy_goal(vertex_input): """Get the index of the fictitious dummy goal (last vertex + 1)""" if isinstance(vertex_input, int): v_g = vertex_input + 1 return v_g elif isinstance(vertex_input, list): v_g = vertex_input[-1] + 1 return v_g else: print('Wrong type of input, accepts integer or list') return None
5a9a9cf8283580d50f7cf2f5a2d5e3ed1667da7f
685,117
from typing import Any def is_not(a: Any, b: Any, /) -> bool: """Check if the arguments are different objects.""" return id(a) != id(b)
11927ceac6ef3e2885b2825223257cf6c40da8c3
685,118
import torch def clip_gradient(model, clip_norm=10): """ clip gradients of each parameter by norm """ for param in model.parameters(): torch.nn.utils.clip_grad_norm(param, clip_norm) return model
d7b4ad07acb9625910782481cbaebb8a4d81954a
685,119
import random def split_partition(annotations): """Randomly split annotations into train (80%), dev (10%), and test (10%) sets""" train_annotations = dict() dev_annotations = dict() test_annotations = dict() doc_names = list(annotations.keys()) random.seed(100) #To ensure the shuffle always return the same result (allows reproducibility) random.shuffle(doc_names) split_1 = int(0.8 * len(doc_names)) split_2 = int(0.9 * len(doc_names)) train_doc_names = doc_names[:split_1] dev_doc_names = doc_names[split_1:split_2] test_doc_names = doc_names[split_2:] for doc in train_doc_names: train_annotations[doc] = annotations[doc] for doc in dev_doc_names: dev_annotations[doc] = annotations[doc] for doc in test_doc_names: test_annotations[doc] = annotations[doc] return train_annotations, dev_annotations, test_annotations
752078e8fd4121db702f0a79d8995f7c27446568
685,122
import math def conttogrowth(contrate): """ Convert continuous compounding rate to annual growth """ return math.exp(contrate)
b3b3c9a7a7ee7d16a19bb0b17294a4dab0012297
685,124
def Backtracking_linesearch(f, x, lambda_newton, Delta_x,options): """ The goal of this function is to perform a backtracking linesearch to adapt the stepsize t of the Newton step, i.e. prepare a damped Newton step. For this, do the following: 1. Imports and definitions 2. Loop till conditions satisfied The stepsize t is reduced until the condition f(x+t Delta_x) < f(x) + t alpha <grad_f, Delta_x> is satisfied. INPUTS The inputs consist in an objective function f used to check validity of the Hessian approximation as well as an evaluation point x and the Newton decrement lambda_newton. Furthermore, the descent direction Delta_x needs to be pro- vided together with some options on (alpha, beta, tolerances) that feature in backtracking line search algorithms. Name Interpretation Type f The objective function for which the Function handle Armijo optimality condition is to be checked. Calculates the objective values f(x) and f(x+t Delta_x). x The position at which gradients and Matrix [n_exp,n_exp] search directions are evaluated. lambda_newton The Newton decrement quantifying the A positive real number decrease of the objective function in the direction of Delta_x Delta_x Provides the descent direction, for Matrix [n_exp,n_exp] which a reasonable stepsize t is to be determined. The recommended update is then x = x + t Delta x options Tuple containing the values for alpha, Tuple (alpha,beta,max_iter) beta and maximum iterations to arrive at a reasonable stepsize. OUTPUTS The outputs consist in the stepsize t, a real number guaranteeing that Newton updates do not leave the psd cone. Name Interpretation Type t Stepsize for a robust damped Newton Real number in [0,1] update """ """ 1. Imports and definitions ------------------------------------------- """ # i) Import packages # ii) Define auxiliary quantities alpha=options[0] beta=options[1] max_iter=options[2] # iii) Initial function evaluations t=1 f_val_x=f(x) f_val_x_mod=f(x+t*Delta_x) difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2)) """ 2. Loop till conditions satisfied ------------------------------------ """ # i) Iterate k=1 while difference>0 and k<max_iter: t=beta*t f_val_x_mod=f(x+t*Delta_x) difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2)) k=k+1 if k==max_iter: t=0 # ii) Assemble solution return t
be0761ed8ee59e635ce17db2b952aa2faa4e64ba
685,126
def ajoute_virgule(valeur1): """Ajoute une virgule, s'il n'y en a pas déjà.""" if valeur1.find(".") == -1: return valeur1 + "." else: return valeur1
f4533c8d59c535c8c4a1fa9f69b3891f0412725e
685,133
def make_profile_mask(ref_2d_profile, threshold=1e-3): """Build a mask of the trace based on the 2D profile reference file. :param ref_2d_profile: the 2d trace profile reference. :param threshold: threshold value for excluding pixels based on ref_2d_profile. :type ref_2d_profile: array[float] :type threshold: float :returns: bkg_mask - Masks pixels in the trace based on the 2d profile reference file. :rtype: array[bool] """ bkg_mask = (ref_2d_profile > threshold) return bkg_mask
61d8dc14a2f5bf8c7fdeccc101aacca18b90e09e
685,137
import random def initialize_hetero_vector(class_, limits): """Initializes a heterogeneous vector of type `class_` based on the values in `limits`. :param class_: the class into which the final list will be typecasted into. :param limits: a list that determines whether an element of the individual is a bool, int or float. It also provides lower and upper limits for the int and float elements. :returns: a heterogeneous vector of type `class_`. Furthermore, it assumes that `limits` is a list and it elements have the folowing format: `['float', min_flt, max_flt]` for float values; `['int', min_int, max_int]` for int values; and `'bool'` for boolean values. """ x = [] for i in range(0, len(limits)): if limits[i] == 'bool': x += [bool(random.getrandbits(1))] else: if type(limits[i][0]) == float: x += [random.uniform(limits[i][0], limits[i][1])] if type(limits[i][0]) == int: x += [random.randint(limits[i][0], limits[i][1])] return class_(x)
5cbdb42852b68b6b22c5fd12493c526d68372c65
685,138
def kpt_flip(kpts, img_shape, flip_pairs, direction): """Flip keypoints horizontally or vertically. Args: kpts (Tensor): Shape (..., 2) img_shape (tuple): Image shape. flip_pairs (list): Flip pair index. direction (str): Flip direction, only "horizontal" is supported now. Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert kpts.shape[-1] % 2 == 0 assert direction == 'horizontal' flipped = kpts.clone() flipped[..., 0] = img_shape[1] - flipped[..., 0] for pair in flip_pairs: flipped[:, pair, :] = flipped[:, pair[::-1], :] return flipped
33159bb7d2fda04bfb29d7a7deef335fef2bd0e2
685,142
import struct def _StToNum(S): """ Convert S to a number. :param S: The (big-endian) bytestring to convert to an integer :type S: bytes :return: An integer representation of the bytestring (rightmost chr == LSB) :rtype: int """ return struct.unpack('>L', S)[0]
34fa90e7ef9cef9c46d9a423f85e517f5436fb58
685,144
def cleanp(stx): """ Simple string cleaner """ return stx.replace("(", "").replace(")", "").replace(",", "")
2b5407d9c03371d031410df74136c3783ee3babe
685,145
def mergeRegions(regions): """ Given a list of [(chrom, start, end), ...], merge all overlapping regions This returns a dict, where values are sorted lists of [start, end]. """ bar = sorted(regions) out = dict() last = [None, None, None] for reg in bar: if reg[0] == last[0] and reg[1] <= last[2]: if reg[2] > last[2]: last[2] = reg[2] continue else: if last[0]: if last[0] not in out: out[last[0]] = list() out[last[0]].append([last[1], last[2]]) last = [reg[0], reg[1], reg[2]] if last[0] not in out: out[last[0]] = list() out[last[0]].append([last[1], last[2]]) return out
2515881e7f4755e763a9befc8d9c8e1222ce18c5
685,146
def parse_notifier_name(name): """Convert the name argument to a list of names. Examples -------- >>> parse_notifier_name('a') ['a'] >>> parse_notifier_name(['a','b']) ['a', 'b'] >>> parse_notifier_name(None) ['anytrait'] """ if isinstance(name, str): return [name] elif name is None: return ['anytrait'] elif isinstance(name, (list, tuple)): for n in name: assert isinstance(n, str), "names must be strings" return name
487e2a35fdf3777d2719bfdb09664e85668ab673
685,147
def sqrt_expansion(ceil): """ Returns the number of fractions (of the expansion of sqrt(2)) where the numerator has more digits than the denominator in the first ceil iterations. """ counter = 0 frac = [1, 1] for _ in range(ceil): frac = [frac[0] + 2 * frac[1], frac[0] + frac[1]] if len(str(frac[0])) > len(str(frac[1])): counter += 1 return counter
74895c988fedce8de3160e3d4219f27668ff9ac7
685,151
def _prune_arg(l, key, extra=0): """Removes list entry "key" and "extra" additional entries, if present. Args: l (list): The list to prune. key (object): The list entry to identify and remove. extra (int): Additional entries after key to prune, if found. """ try: idx = l.index(key) args = l[idx:idx+extra+1] del(l[idx:idx+extra+1]) return args except ValueError: return None
2a8f6826e12607e4f5192b8b3129fbb4512ad684
685,153
from typing import Set def get_genes_in_overlap(genes, overlap_list) -> Set[str]: """Select members from a gene list which are also in a given overlap(second list of genes) Parameters ---------- genes : list ordered list of genes from which to select from overlap_list : list list of genes from which selected genes must be in cutoff Returns ------- list of genes that are also present in the overlap list """ top_genes = set() for gene in genes: if gene in overlap_list: top_genes.add(gene) return top_genes
f3d34d39798d1f538f4b3e19ddee0eaf011b5603
685,154
def _get_y_coord(tile): """ Return y coordinate of square """ return tile[1]
d5d6af47095c076f826ca982d59506e5d96c0eb7
685,157
def list2dict( l=['A', 'B', 'C'], keylist=['a', 'b', 'c'] ): """ task 0.5.31 && task 0.6.3 implement one line procedure taking a list of letters and a keylist output should be a dict. that maps them together e.g. input l=['A', 'B', 'C'] keylist=['a', 'b', 'c'] output={'a':'A', 'b':'B', 'c':'C'} def list2dict(l=['A', 'B', 'C'], keylist=['a', 'b', 'c']): return {k:l[i] for i,k in enumerate(keylist)} """ return {k:l[i] for i,k in enumerate(keylist)}
002fdf595629f79928d36615de7ea7bc32231148
685,158
def find_selfloop_nodes(G): """ Finds all nodes that have self-loops in the graph G. """ nodes_in_selfloops = [] # Iterate over all the edges of G for u, v in G.edges(): # Check if node u and node v are the same if u == v: # Append node u to nodes_in_selfloops nodes_in_selfloops.append(u) return nodes_in_selfloops
7c181e38067f0334649874b4bcc0a7e75c501a0b
685,161
def rerun_probability(n_runs): """ Calculates a probability for running another episode with the same genome. """ if n_runs <= 0: return 1 return 1 / n_runs**2
163e6431b0a8b80893bd3771b32fc217c0598969
685,162