content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def poisson(datum: torch.Tensor, time: int, dt: float = 1.0, **kwargs) -> torch.Tensor: # language=rst """ Generates Poisson-distributed spike trains based on input intensity. Inputs must be non-negative, and give the firing rate in Hz. Inter-spike intervals (ISIs) for non-negative data incremented by one to avoid zero intervals while maintaining ISI distributions. :param datum: Tensor of shape ``[n_1, ..., n_k]``. :param time: Length of Poisson spike train per input variable. :param dt: Simulation time step. :return: Tensor of shape ``[time, n_1, ..., n_k]`` of Poisson-distributed spikes. """ assert (datum >= 0).all(), "Inputs must be non-negative" # Get shape and size of data. shape, size = datum.shape, datum.numel() datum = datum.flatten() time = int(time / dt) # Compute firing rates in seconds as function of data intensity, # accounting for simulation time step. rate = torch.zeros(size) rate[datum != 0] = 1 / datum[datum != 0] * (1000 / dt) # Create Poisson distribution and sample inter-spike intervals # (incrementing by 1 to avoid zero intervals). dist = torch.distributions.Poisson(rate=rate) intervals = dist.sample(sample_shape=torch.Size([time + 1])) intervals[:, datum != 0] += (intervals[:, datum != 0] == 0).float() # Calculate spike times by cumulatively summing over time dimension. times = torch.cumsum(intervals, dim=0).long() times[times >= time + 1] = 0 # Create tensor of spikes. spikes = torch.zeros(time + 1, size).byte() spikes[times, torch.arange(size)] = 1 spikes = spikes[1:] return spikes.view(time, *shape)
67d999f5d31b03f62eefc64f1af06b3e5f09a7cd
690,271
def parse_dimensions(dimensions): """ Parse the width and height values from a dimension string. Valid values are '1x1', '1x', and 'x1'. If one of the dimensions is omitted, the parse result will be None for that value. """ width, height = [d.strip() and int(d) or None for d in dimensions.split('x')] return dict(width=width, height=height)
69e02ab02dd0d37c4d72e591f40d91dff0d18de1
690,272
def is_subgraph(G, H): """ Checks whether G is a subgraph of H, that is whether all the edges of G belong to H """ edges1 = set(G.edges()) edges2 = set(H.edges()) return len(edges1) == len(edges1 & edges2)
9d57238cbf0854ed1cb7c60afb7d6e008a927348
690,276
def ns_faint(item_name): """Prepends the faint xml-namespace to the item name.""" return '{http://www.code.google.com/p/faint-graphics-editor}' + item_name
d1e28638b1735b3f14752ba684528f134666dd77
690,279
def polygon(self, nvert="", x1="", y1="", x2="", y2="", x3="", y3="", x4="", y4="", **kwargs): """Creates annotation polygons (GUI). APDL Command: /POLYGON Parameters ---------- nvert Number of vertices of polygon (3 NVERT 8). Use /PMORE for polygons with more than 4 vertices. x1 X location for vertex 1 of polygon (-1.0 < X < 2.0). y1 Y location for vertex 1 of polygon (-1.0 < Y < 1.0). x2 X location for vertex 2 of polygon (-1.0 < X < 2.0). y2 Y location for vertex 2 of polygon (-1.0 < Y < 1.0). x3 X location for vertex 3 of polygon (-1.0 < X < 2.0). y3 Y location for vertex 3 of polygon (-1.0 < Y < 1.0). x4 X location for vertex 4 of polygon (-1.0 < X < 2.0). y4 Y location for vertex 4 of polygon (-1.0 < Y < 1.0). Notes ----- Creates annotation polygons to be written directly onto the display at a specified location. This is a command generated by the Graphical User Interface (GUI) and will appear in the log file (Jobname.LOG) if annotation is used. This command is not intended to be typed in directly in an ANSYS session (although it can be included in an input file for batch input or for use with the /INPUT command). All polygons are shown on subsequent displays unless the annotation is turned off or deleted. Use the /LSPEC and the /PSPEC command to set the attributes of the polygon. Use the /PMORE command to define the 5th through 8th vertices of the polygon. This command is valid in any processor. """ command = f"/POLYGON,{nvert},{x1},{y1},{x2},{y2},{x3},{y3},{x4},{y4}" return self.run(command, **kwargs)
9af0e648121c08d2438745eb18e345cf41ae36c7
690,282
def get_admin_site_name(context): """ Get admin site name from context. First it tries to find variable named admin_site_name in context. If this variable is not available, admin site name is taken from request path (it is first part of path - between first and second slash). """ admin_site_name = context.get('admin_site_name', None) if admin_site_name is None: admin_site_name = context.get('request').path.split('/')[1] return admin_site_name
c18da7e89ff6c6190d276956e7031cac74683b63
690,284
def to_iso_format( date_time): """ Return a string representing the date and time in ISO 8601 format, YYYY-MM-DD HH:MM:SS.mmmmmm or, if microsecond is 0, YYYY-MM-DD HH:MM:SS If utcoffset() does not return None, a 6-character string is appended, giving the UTC offset in (signed) hours and minutes: YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM or, if microsecond is 0 YYYY-MM-DD HH:MM:SS+HH:MM """ return date_time.isoformat(sep=" ")
41fcc983707874da2bac3407f5f3bfdb8e9807b8
690,287
def re_fun(number): """递归调用,计算某个数的阶乘""" if number == 1: return number return number * re_fun(number - 1)
a57c3a4f3d8609c4733004dee47e15e3dfbe45f1
690,289
def get_data_matching_filter(filter_by, raw_data): """ Returns a list of data matching filter_by from a given STB's raw_data :param filter_by: Tuple containing (filter_name, filter_value) :param raw_data: Dictionary of raw stb_data for 1 STB. :return: List of dictionary entries from flattened STB matching filter. """ result = [] # If there is no filter, then return all data from this STB. filter_name, filter_value = filter_by if filter_by else ('stb', raw_data['stb']) # Flatten the data structure and filter desired results for date in (keys for keys in raw_data if keys not in ['stb']): for title in raw_data[date]: entry = { 'stb': raw_data['stb'], 'date': date, 'title': title, 'provider': raw_data[date][title]['provider'], 'rev': raw_data[date][title]['rev'], 'time': raw_data[date][title]['time'] } # If the entry matches the filter, add flattened data to the result list result += [entry] if entry[filter_name] == filter_value else [] return result
acce77c830371022be09ff0dc1337e419fd0818e
690,291
def is_float(arg): """ Returns True iff arg is a valid float """ if isinstance(arg, float) or isinstance(arg, int): return True try: float(arg) except ValueError: return False return True
408ce0ef3e1d40c4e82808785368c621448a6001
690,293
def make_rest(step_size): """ this method constructs a placeholder for a rest note """ return {"rest":step_size}
c4ad4a70c1417c82decc554712652f225ff124af
690,294
def get_sessions_by_dimensions( service, profile_id, startDate, endDate, segment, dimensions, metrics="ga:sessions" ): """ Query the API for channel grouping for sessions WITH dimensions And sort query output by the first dimension specified Args: service: authenticated connection object profile_id: view in GA in the form 'ga:12345678' startDate: start of query as YYYY-MM-DD endDate: end of query as YYYY-MM-DD segment: Segment to apply. Leave blank if not required. dimensions: string of comma separated values. metrics: string of comma separated values. Leave blank for default. Returns: query output """ # if there is more than one dimension, # use a substring for the first dimension before the 1st comma sort = dimensions if sort.find(",") > 0: sort = sort[: sort.find(",")] # Execute. # Choose between query structure with and without segment if segment: result = ( service.data() .ga() .get( ids=profile_id, start_date=startDate, end_date=endDate, metrics=metrics, segment=segment, # with segment dimensions=dimensions, sort=sort, ) .execute() ) else: result = ( service.data() .ga() .get( ids=profile_id, start_date=startDate, end_date=endDate, metrics=metrics, dimensions=dimensions, sort=sort, ) .execute() ) return result
c0099624b25e91f2d0d4f5363d55663309369bcd
690,295
def backpointer_cell_to_string(cell): """Makes a string of a cell of a backpointer chart""" s="[" for (k,rhs) in cell: s+="(%i, %s)"%(k,",".join(rhs)) s+="]" return s
a3a416fd08ffdfb96419f6202b54e478052eb631
690,296
def cutStrAndAlignRight(s,length=8): """ Cuts the string down to the specified length and if it's shorter then it aligns it to the right """ if len(s) >= length: return s[0:8] else: return ' '*(length-len(s))+s
01a83bb99cb8f9e1e23ea1470011f68bdd2738c0
690,297
def line_split_to_str_list(line): """E.g: 1 2 3 -> [1, 2, 3] Useful to convert numbers into a python list.""" return "[" + ", ".join(line.split()) + "]"
3f06e0f03b22d4dd92939afe7a18d49a1bc236dc
690,299
from pathlib import Path def single_duplicate_bond_index_v3000_sdf(tmp_path: Path) -> Path: """Write a single molecule to a v3000 sdf with a duplicate bond index. Args: tmp_path: pytest fixture for writing files to a temp directory Returns: Path to the sdf """ sdf_text = """ 0 0 0 0 0 999 V3000 M V30 BEGIN CTAB M V30 COUNTS 9 9 0 0 0 M V30 BEGIN ATOM M V30 1 C 87.71 -95.64 0 0 M V30 2 C 87.71 -81.29 0 0 M V30 3 C 100.18 -74.09 0 0 M V30 4 C 100.18 -59.69 0 0 M V30 5 C 87.71 -52.49 0 0 M V30 6 C 75.24 -59.69 0 0 M V30 7 C 75.24 -74.09 0 0 M V30 8 C 87.71 -38.09 0 0 M V30 9 O 100.18 -30.89 0 0 M V30 END ATOM M V30 BEGIN BOND M V30 1 1 1 2 M V30 2 1 2 3 M V30 3 2 3 4 M V30 4 1 4 5 M V30 5 2 5 6 M V30 6 1 6 7 M V30 7 2 7 2 M V30 1 1 5 8 M V30 9 2 8 9 M V30 END BOND M V30 END CTAB M END $$$$ """ outpath = tmp_path / "input.sdf" with open(outpath, "w") as outh: outh.write(sdf_text) return outpath
b698fbef40ffc32edb7ee41f76d0c6d53e2ddb3c
690,300
def extract_github_owner_and_repo(github_page): """ Extract only owner and repo name from GitHub page https://www.github.com/psf/requests -> psf/requests Args: github_page - a reference, e.g. a URL, to a GitHub repo Returns: str: owner and repo joined by a '/' """ if github_page == "": return "" # split on github.com split_github_page = github_page.split("github.com") # take portion of URL after github.com and split on slashes github_url_elements = split_github_page[1].split("/") # rejoin by slash owner and repo name github_owner_and_repo = ("/").join(github_url_elements[1:3]) return github_owner_and_repo
dbcee3c500d6650a1a48c372412ce5e37723b34b
690,301
from typing import Union def invalid_output( query: dict, db_query: Union[str, dict], api_key: str, error: str, start_record: int, page_length: int) -> dict: """Create and return the output for a failed request. Args: query: The query in format as defined in wrapper/input_format.py. db_query: The query that was sent to the API in its language. api_key: The key used for the request. error: The error message returned. start_record: The index of the first record requested. page_length: The page length requested. Returns: A dict containing the passed values and "-1" as index where necessary to be compliant with wrapper/output_format. """ out = dict() out["query"] = query out["dbQuery"] = db_query out["apiKey"] = api_key out["error"] = error out["result"] = { "total": "-1", "start": str(start_record), "pageLength": str(page_length), "recordsDisplayed": "0", } out["records"] = list() return out
4a97a89f5ce7003d582b23b8e7ca036eff74a3b0
690,304
def time_mirror(clip): """ Returns a clip that plays the current clip backwards. The clip must have its ``duration`` attribute set. The same effect is applied to the clip's audio and mask if any. """ return clip.time_transform(lambda t: clip.duration - t - 1, keep_duration=True)
4f7283cf53090946ed41fc1c736a7c84f7cfba37
690,306
def get_smallest_divisible_number_brute_force(max_factor): """ Get the smallest divisible number by all [1..max_factor] numbers by brute force. """ number_i = max_factor while True: divisible = True for factor_i in range(1, max_factor+1): if number_i % factor_i > 0: divisible = False break if divisible: return number_i number_i += 1
f30159375bf852e77da2fcee21f8c5e407042b95
690,310
from typing import Any from typing import List def lst(*a: Any) -> List[Any]: """Returns arguments *a as a flat list, any list arguments are flattened. Example: lst(1, [2, 3]) returns [1, 2, 3]. """ flat = [] for v in a: if isinstance(v, list): flat.extend(v) else: flat.append(v) return flat
cb1c03058fab81071a22e7ca6febe898676c9a40
690,311
def parse_stam(organization_data): """ Used to parse stamnummer of organization. """ return str(organization_data["stamNr"])
2937dc908a3a81d9bca35fe47a5327d4befbf22d
690,312
def construct_type(code): """Construct type in response.""" # return 'https://bcrs.gov.bc.ca/.well_known/schemas/problem#{}'.format(code) return code
f98dfa58c6d6573a2dd5d37851be638831a96156
690,317
def silverS(home_score, away_score): """Calculate S for each team (Source: https://www.ergosum.co/nate-silvers-nba-elo-algorithm/). Args: home_score - score of home team. away_score - score of away team. Returns: 0: - S for the home team. 1: - S for the away team. """ S_home, S_away = 0, 0 if home_score > away_score: S_home = 1 elif away_score > home_score: S_away = 1 else: S_home, S_away = .5, .5 return S_home, S_away
29b714fa9392deba82310fe3bcab7f23300c82bb
690,318
def _get_run_tag_info(mapping): """Returns a map of run names to a list of tag names. Args: mapping: a nested map `d` such that `d[run][tag]` is a time series produced by DataProvider's `list_*` methods. Returns: A map from run strings to a list of tag strings. E.g. {"loss001a": ["actor/loss", "critic/loss"], ...} """ return {run: sorted(mapping[run]) for run in mapping}
7840615622176894d6623aa26f4dd5ae0d204620
690,319
def trrotate_to_standard(trin, newchan = ("E", "N", "Z")): """Rotate traces to standard orientation""" return trin.rotate_to_standard(newchan)
85471e817e2000a3781b32237dcea488109378d7
690,320
def score(boards, number, index): """Return the final score of the board that contains index.""" first = index - index % 25 return int(number) * sum(int(boards[i]) for i in range(first, first + 25))
83df16bd015277c1906343b61be7702ff8d2a3bd
690,321
def combine_two_lists_no_duplicate(list_1, list_2): """ Method to combine two lists, drop one copy of the elements present in both and return a list comprised of the elements present in either list - but with only one copy of each. Args: list_1: First list list_2: Second list Returns: The combined list, as described above """ additional_unique_elements_list_2 = [i for i in list_2 if i not in list_1] return list_1 + additional_unique_elements_list_2
4caa2d3494eeef61d502bc898433d04362e81f39
690,327
def match1(p, text): """Return true if first character of text matches pattern character p.""" if not text: return False return p == '.' or p == text[0]
be9c804b03ff13a1e40a7661be13b4d48a8aaad6
690,328
import random def shuffle(lst): """ Shuffle a list """ random.shuffle(lst) return lst
2a50b74306c8fd580a2b9c6ae392d9ce9b34b786
690,336
import math def bit_length(i: int) -> int: """Returns the minimal amount of bits needed to represent unsigned integer `i`.""" return math.ceil(math.log(i + 1, 2))
672063db380fd0957e68b0974a13e64b610c999b
690,339
from typing import Union def convert_volume(value: Union[float, str]) -> float: """Convert volume to float.""" if value == "--": return -80.0 return float(value)
f93ed46f865ce14e2886bcac8fd0aa56b04f0658
690,345
def _ag_checksum(data): """ Compute a telegram checksum. """ sum = 0 for c in data: sum ^= c return sum
32d1ef971fc1fb63fb0583c32c309de3de41f39d
690,351
import turtle def create_turtle(x, y): """[summary] Create the turtle pen with specific attributes [description] Set speed and pen color Direction is set default due east Pen is returned in list with x and y coordinates """ t = turtle.Pen() t.speed(8) t.pencolor("white") return [t, x, y]
305a0e63801821543a9087e245e4cc4c7ceee03d
690,354
import random import string def rndstr(length): """Generate random string of given length which contains digits and lowercase ASCII characters""" return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
698dde72332c8995ebae01ad861037b61bc3be8f
690,356
def bb_union(*bbs): """Returns a bounding box containing the given bboxes""" return [min(*x) for x in zip(*[b[0] for b in bbs])],[max(*x) for x in zip(*[b[1] for b in bbs])]
74631549fdd2aebb1e96790d54b88796d1f0dbe7
690,357
def search_list_of_objs(objs, attr, value): """Searches a list of objects and retuns those with an attribute that meets an equality criteria Args: objs (list): The input list attr (str): The attribute to match value (any): The value to be matched Returns: list[any]: The list of objects found """ return [obj for obj in objs if getattr(obj, attr) == value]
061920f5acac2a4b1f3368aef8f8d42472454861
690,358
def tw_to_rgb(thxs): """ Convert a 12-digit hex color to RGB. Parameters ---------- thxs : str A 12-digit hex color string. Returns ------- tuple[int] An RGB tuple. """ if len(thxs) != 13: if len(thxs) == 12: raise ValueError("thxs is not correctly formatted (#xxxxxxxxxxxx).") else: print(thxs) raise ValueError("thxs must be 12 digits long with '#' at the beginning.") return (int(thxs[1:3], 16), int(thxs[5:7], 16), int(thxs[9:11], 16))
c5117924c009c65685604fb2e99ebacb3e051588
690,359
def make_pair(coll, lbracket='<', rbracket='>'): """ A context aware function for making a string representation of elements of relationships. It takes into account the length of the element. If there is just one element, the brackets are left of, but when there are more, all the elements will be seperated by a comma, and the brackets will be inserted as well. :param coll: The collection that needs to be printed. Can be a generator, but cannot be infinite. :param str lbracket: The bracket that goes on the left. :param str rbracket: The bracket that goes on the right. :returns: A context aware string representation of the pair. :rtype: str """ coll = list(coll) if len(coll) == 1: return str(coll[0]) return lbracket + ', '.join(coll) + rbracket
2d3726adb7765a2a0eb2fc6fe238427b683f68e3
690,361
def _get2DArea(box): """Get area of a 2D box""" return (box['right']-box['left']) * (box['bottom']-box['top'])
1e6675b93717263851ac8c01edfd3c4e9d5b1889
690,366
from typing import List def to_base_digits(value: int, base: int) -> List[int]: """Returns value in base 'base' from base 10 as a list of digits""" ret = [] n = value while n > base: n, digit = divmod(n, base) ret.append(digit) ret.append(n) return ret[::-1]
86209a380f04a28b9252ee2935edaa7db236a019
690,368
def get_neighbors(nodeid, adjmatrix): """returns all the direct neighbors of nodeid in the graph Args: nodeid: index of the datapoint adjmatrix: adjmatrix with true=edge, false=noedge Returns: list of neighbors and the number of neighbors """ neighbors = [] num = 0 for i in range(0, adjmatrix.shape[0], 1): if adjmatrix[i][nodeid]: neighbors.append(i) num += 1 return neighbors, num
d1a7407d23c0c27ac17339a2fe1453e46994cf56
690,369
def get_ucr_class_name(id): """ This returns the module and class name for a ucr from its id as used in report permissions. It takes an id and returns the string that needed for `user.can_view_report(string)`. The class name comes from corehq.reports._make_report_class, if something breaks, look there first. :param id: the id of the ucr report config :return: string class name """ return 'corehq.reports.DynamicReport{}'.format(id)
5cdda11d8e79c725a0b2c48b573dad4b3a0f1b39
690,372
def _run_test_func(mod, funcpath): """ Run the given TestCase method or test function in the given module. Parameters ---------- mod : module The module where the test resides. funcpath : str Either <testcase>.<method_name> or <func_name>. Returns ------- object In the case of a module level function call, returns whatever the function returns. """ parts = funcpath.split('.', 1) if len(parts) == 2: tcase_name, method_name = parts testcase = getattr(mod, tcase_name)(methodName=method_name) setup = getattr(testcase, 'setUp', None) if setup is not None: setup() getattr(testcase, method_name)() teardown = getattr(testcase, 'tearDown', None) if teardown: teardown() else: funcname = parts[0] return getattr(mod, funcname)()
9be3ad768d01a03da6038d059fa7bf9c5d845b07
690,375
def scope_minimal_nr_tokens(df_in, min_nr_tokens=1): """ Remove destinations with fewer tokens than the set minimum (default: at least 1). """ return df_in.loc[lambda df: df["nr_tokens"] >= min_nr_tokens]
e4104be6245dedd777378d43593999a0926ca512
690,382
def eval_en(x, mol): """ Evaluate the energy of an atom. """ mol.set_positions(x) return [mol.get_potential_energy()]
6cfef7c3eb81b6156f30f15c5180c8394c05ee2f
690,384
def default_before_hook(*args, **kwargs): """The default before hook, will act like it's not even there """ return args, kwargs
4341322ee7fb1969c1fe88b06dfcb1a873b02ddf
690,388
def parse_dict(raw_dict, ignore_keys=[]): """ Parses the values in the dictionary as booleans, ints, and floats as appropriate Parameters ---------- raw_dict : dict Flat dictionary whose values are mainly strings ignore_keys : list, optional Keys in the dictionary to remove Returns ------- dict Flat dictionary with values of expected dtypes """ def __parse_str(mystery): if not isinstance(mystery, str): return mystery if mystery.lower() == 'true': return True if mystery.lower() == 'false': return False # try to convert to number try: mystery = float(mystery) if mystery % 1 == 0: mystery = int(mystery) return mystery except ValueError: return mystery if not ignore_keys: ignore_keys = list() else: if isinstance(ignore_keys, str): ignore_keys = [ignore_keys] elif not isinstance(ignore_keys, (list, tuple)): raise TypeError('ignore_keys should be a list of strings') clean_dict = dict() for key, val in raw_dict.items(): if key in ignore_keys: continue val = __parse_str(val) if isinstance(val, (tuple, list)): val = [__parse_str(item) for item in val] elif isinstance(val, dict): val = parse_dict(val, ignore_keys=ignore_keys) clean_dict[key] = val return clean_dict
fd9d2991ab2385850b5092b17e4e294e263a69db
690,392
def get_feature_names(npcs=3): """ Create the list of feature names depending on the number of principal components. Parameters ---------- npcs : int number of principal components to use Returns ------- list name of the features. """ names_root = ["coeff" + str(i + 1) + "_" for i in range(npcs)] + [ "residuo_", "maxflux_", ] return [i + j for j in ["g", "r"] for i in names_root]
4ff4ea02ec88b11010a5f675be31ff598a820b68
690,393
def _map_action_index_to_output_files(actions, artifacts): """Constructs a map from action index to output files. Args: actions: a list of actions from the action graph container artifacts: a map {artifact_id: artifact path} Returns: A map from action index (in action graph container) to a string of concatenated output artifacts paths. """ action_index_to_output_files = {} for i, action in enumerate(actions): output_files = " ".join( sorted([artifacts[output_id] for output_id in action.output_ids])) action_index_to_output_files[i] = output_files return action_index_to_output_files
ee38063460654d0f30fdf400f99591198592115c
690,394
def adsorption(CGF, CGRA, CET, cg, epsl, KdF, KdR, KI): """ Adsorption equilibrium of enzyme between facile and recalcitrant glucan, and accounting for inhibition by glucose (and other sugars if present) """ CEGF = CET/(1 + KdF/KdR*CGRA/CGF + epsl*KdF/CGF*(1 + cg/KI)) CEGR = CET/(1 + KdR/KdF*CGF/CGRA + epsl*KdR/CGRA*(1 + cg/KI)) return CEGF, CEGR
ca0c4044299253138ca513f39debc2c6189a48a6
690,400
def container_logs(client, resource_group_name, name, container_name=None): """Tail a container instance log. """ if container_name is None: container_name = name log = client.container_logs.list(resource_group_name, container_name, name) return log.content
6ce1ff396e6993bbe1995ab7f3576e5c952ee675
690,401
def float_list_string(vals, nchar=7, ndec=3, nspaces=2, mesg='', left=False): """return a string to display the floats: vals : the list of float values nchar : [7] number of characters to display per float ndec : [3] number of decimal places to print to nspaces : [2] number of spaces between each float """ if left: format = '%-*.*f%*s' else: format = '%*.*f%*s' istr = mesg for val in vals: istr += format % (nchar, ndec, val, nspaces, '') return istr
87be23b879df35e672f32a89bfa0266d73812083
690,410
def tiles_from(state_pkt): """ Given a Tile State packet, return the tile devices that are valid. This works by taking into account ``tile_devices_count`` and ``start_index`` on the packet. """ amount = state_pkt.tile_devices_count - state_pkt.start_index return state_pkt.tile_devices[:amount]
b3d89aadf5530ca391876ffc2c6956769e455555
690,411
def balance_queue_modifier(count_per_day: float) -> float: """ Create a modifier to use when setting filter values. Because our queue is only ever 1k posts long (reddit limitation), then we never want any given sub to take up any more than 1/100th of the queue (seeing as how we have ~73 partners right now, seems like a reasonable amount). This is so that if a sub gets 3 posts per day, we can adequately bring in everything, but if it has 800 posts a day (r/pics) then the value is adjusted appropriately so that it doesn't overwhelm the queue. """ target_queue_percentage = 0.01 queue_percentage = count_per_day / 1000 return target_queue_percentage / queue_percentage
cc5637811b4fb461f6996924ac8143a8e8453e59
690,412
def seq_mult_scalar(a, s): """Takes a list of numbers a and a scalar s. For the input a=[a0, a1, a2,.., an] the function returns [s * a0, s * a1, s * a2, ..., s * an]""" return [s * i for i in a]
7d59162d19ec5bd445e7b960bf8f7369ce3b8693
690,413
def fix_z_dir(job): """ Rather than fixing all directions only fix the z-direction during an NPT simulation Args: job (LAMMPS): Lammps job object Returns: LAMMPS: Return updated job object """ job.input.control["fix___ensemble"] = job.input.control["fix___ensemble"].replace( "x 0.0 0.0 1.0 y 0.0 0.0 1.0 z 0.0 0.0 1.0", "z 0.0 0.0 1.0" ) return job
534918de30bdc1bd852d2fffd9e6da7bec33ed7e
690,414
def get_uid(instance): """ 获取实例的 uid (hex). get hex uid from instance. Examples:: data = { 'uid': instance.uid.hex if instance else None, 'related_uid': instance.related.uid.hex if instance.related else None, } data = { 'uid': get_uid(instance), 'related_uid': get_uid(instance.related), } :rtype: str | None """ return instance.uid.hex if instance else None
5f0dade02edc61b7c78a450451e4a7f02d65267f
690,416
def idx_tuple_in_df(tuple_x, df): """Find the first row index of tuple_x in df.""" res=None for i,v in enumerate(df.values): if tuple_x == tuple(v): res = i break else: res=None return res
83335128ba7894e848ef0ce4e83e8b37fbdbb07f
690,417
def account_main_purse_uref(CLIENT, account_key: bytes) -> str: """Returns an on-chain account's main purse unforgeable reference. """ return CLIENT.queries.get_account_main_purse_uref(account_key)
e7da0e9182b5a16c7308ddebdc93c453fd2d03b1
690,418
from typing import List def _make_pod_command() -> List[str]: """Generate pod command. Returns: List[str]: pod command. """ return ["./init_icscf.sh", "&"]
72a3fc87a37166d87fcb673a0b8fb97db0583d4d
690,420
def time_content_log_split(log_line): """ Splits a portal.log line into the Time Elapsed and Content sections :param log_line: A line from the portal.log file :return: Values for time elapsed and content of line """ tmp = log_line.replace('[', '') tmp = tmp.split('] ') time = tmp[0] content = tmp[1] return time, content
6a42961a36588bd2a739b9879d705f21a4750df4
690,422
def get_number_rows(settings, star_height): """Determine the number of rows of stars that fit on the screen.""" avaiable_space_y = (settings.screen_height - star_height) number_rows = int(avaiable_space_y / (2 * star_height)) return number_rows
b89c11f82e9060a19e7291908eff6815a777b005
690,423
def unbindReferences(par, modeOnly=False): """ Erase bind strings or change modes for all bindReferences of a parameter :param par: the bindMaster parameter :param modeOnly: if True, just change the references modes to prevMode :return: the references that were changed """ refs = par.bindReferences for p in refs: p.mode = p.prevMode if not modeOnly: p.bindExpr = '' return refs
5c7ad211a808e3b2e69d5a74def521bf22b567ae
690,426
def redshiftFromScale(scale): """ Converts a scale factor to redshift. :param scale: scale factor :type scale: float or ndarray :return: redshift :rtype: float or ndarray """ return 1. / scale - 1.
5f06a52f06ffbea0381389587c801de008e007d4
690,432
def normalize_url(url: str) -> str: """ Remove leading and trailing slashes from a URL :param url: URL :return: URL with no leading and trailing slashes :private: """ if url.startswith('/'): url = url[1:] if url.endswith('/'): url = url[:-1] return url
53f6b26aeac2530010f1804325bd248024f3f07a
690,434
def reliability_calc(RACC, ACC): """ Calculate Reliability. :param RACC: random accuracy :type RACC: float :param ACC: accuracy :type ACC: float :return: reliability as float """ try: result = (ACC - RACC) / (1 - RACC) return result except Exception: return "None"
f33d1d81dffb21c8379b6e135c967809061dcf10
690,439
from typing import Union from typing import Any def to_list(data: Union[tuple, list, Any]): """ If input is tuple, it is converted to list. If it's list, it is returned untouched. Otherwise returns a single-element list of the data. :return: list-ified data """ if isinstance(data, list): pass elif isinstance(data, tuple): data = list(data) else: data = [data] return data
ee9b981f2e44c84b150c46ce8d6450f1c2597f1e
690,440
def dual_id_dict(dict_values, G, node_attribute): """ It can be used when one deals with a dual graph and wants to link analyses conducted on this representation to the primal graph. For instance, it takes the dictionary containing the betweennes-centrality values of the nodes in the dual graph, and associates these variables to the corresponding edgeID. Parameters ---------- dict_values: dictionary it should be in the form {nodeID: value} where values is a measure that has been computed on the graph, for example G: networkx graph the graph that was used to compute or to assign values to nodes or edges node_attribute: string the attribute of the node to link to the edges GeoDataFrame Returns ------- ed_dict: dictionary a dictionary where each item consists of a edgeID (key) and centrality values (for example) or other attributes (values) """ view = dict_values.items() ed_list = list(view) ed_dict = {} for p in ed_list: ed_dict[G.nodes[p[0]][node_attribute]] = p[1] # attribute and measure return ed_dict
d401946bcde7cb0bd5c276e770337115beb93fbf
690,450
from typing import Callable import asyncio def add_async_job(target: Callable, *args): """Add a callable to the event loop.""" loop = asyncio.get_event_loop() if asyncio.iscoroutine(target): task = loop.create_task(target) elif asyncio.iscoroutinefunction(target): task = loop.create_task(target(*args)) else: task = loop.run_in_executor(None, target, *args) return task
590bce904241c598e742d6c7370ebf2563aba5f1
690,451
def GCF(a, b): """ Finds Greatest Common Factor of two given numbers :param a: arbitrary first number :type a: int :param b: arbitrary second number :type b: int :return: greatest common factor :rtype: int """ if type(a) is not int or type(b) is not int: raise TypeError('Input must be float type.') if b > a: return GCF(b, a) if a % b == 0: return b return GCF(b, a % b)
125e890918525e82ddf985fa33956facef8e497d
690,452
def construct_pandoc_command( input_file=None, lua_filter=None, ): """ Construct the Pandoc command. # Parameters input_file:pathlib.Path - The file that we want to apply the lua filter too. lua_filter:pathlib.Path - The path to the lua filter to use for the word counts. # Return A list of CLI elements that will be used by subprocess. """ # -------- # Basic Commands return [ "pandoc", "--lua-filter", lua_filter, input_file, ]
357f4bf76aed2328b86b21f0b706348e5306d6bc
690,453
import re import requests def get_resource_tables(resource_url): """ Returns a list of all the HTML tables for the resource documented at resource_url """ pattern = re.compile(r'(?ims)(\<table\>.*?\</table\>)') response = requests.get(resource_url) return pattern.findall(response.text)
b31ea54a649646d4f00627b09f5cdf3189c0efd4
690,456
import re def split_blurb(lines): """ Split blurb on horizontal rules.""" blurbs = [""] for line in lines.split('\n')[:-1]: if re.match(r'\*{3,}',line): blurbs.append("") else: blurbs[-1] += line + '\n' return blurbs
1c808dc88b889b3d8b3462e0abcea8589f04d66f
690,461
def splitConsecutive(collection, length): """ Split the elements of the list @collection into consecutive disjoint lists of length @length. If @length is greater than the no. of elements in the collection, the collection is returned as is. """ # Insufficient collection size for grouping if len(collection) < length: return collection # Iterate over the collection and collect groupings into results. groupings = [] index = 0 while index < len(collection): groupings.append(collection[index: index +length]) index += length return groupings
77beeef283424e34872122c44b3c48a5ca2bcbc7
690,462
import csv def csv_to_fasta(csv_path, delimiter=","): """Convert a csv-file of the format: <sequence> <name> to a FASTA file.""" result = "" with csv_path.open() as csv_file: rd = csv.reader(csv_file, delimiter=delimiter) for row in rd: result += '> {}\n{}\n'.format(row[1], row[0]) return result
5b88c14b098ed2270636758677d6695f7ce3f2fe
690,463
def _create_titled_group(root, key, title): """Helper to create a titled group in h5py""" out = root.create_group(key) out.attrs['TITLE'] = title return out
678cd39fc37b85cede98f6940a07b6b1bef479a3
690,467
import re def run_and_parse_first_match(run_lambda, command, regex): """ Runs command using run_lambda, returns the first regex match if it exists """ rc, out, _ = run_lambda(command) if rc != 0: return None match = re.search(regex, out) if match is None: return None return match.group(1)
469b90fd65c70d09bc61b9d992673eb96a859c7d
690,469
def fetch_courses(soups): """Fetches each course inside a given page.""" courses = [] for soup in soups: course = soup.find_all('div', class_='item-frame') courses.append(course) return courses
1b728f7b8a42343ced3e84e3fce069a9dc7a1739
690,472
def iter_reduce_ufunc(ufunc, arr_iter, out=None): """ constant memory iteration and reduction applys ufunc from left to right over the input arrays Example: >>> # ENABLE_DOCTEST >>> from vtool_ibeis.other import * # NOQA >>> arr_list = [ ... np.array([0, 1, 2, 3, 8, 9]), ... np.array([4, 1, 2, 3, 4, 5]), ... np.array([0, 5, 2, 3, 4, 5]), ... np.array([1, 1, 6, 3, 4, 5]), ... np.array([0, 1, 2, 7, 4, 5]) ... ] >>> memory = np.array([9, 9, 9, 9, 9, 9]) >>> gen_memory = memory.copy() >>> def arr_gen(arr_list, gen_memory): ... for arr in arr_list: ... gen_memory[:] = arr ... yield gen_memory >>> print('memory = %r' % (memory,)) >>> print('gen_memory = %r' % (gen_memory,)) >>> ufunc = np.maximum >>> res1 = iter_reduce_ufunc(ufunc, iter(arr_list), out=None) >>> res2 = iter_reduce_ufunc(ufunc, iter(arr_list), out=memory) >>> res3 = iter_reduce_ufunc(ufunc, arr_gen(arr_list, gen_memory), out=memory) >>> print('res1 = %r' % (res1,)) >>> print('res2 = %r' % (res2,)) >>> print('res3 = %r' % (res3,)) >>> print('memory = %r' % (memory,)) >>> print('gen_memory = %r' % (gen_memory,)) >>> assert np.all(res1 == res2) >>> assert np.all(res2 == res3) """ # Get first item in iterator try: initial = next(arr_iter) except StopIteration: return None # Populate the outvariable if specified otherwise make a copy of the first # item to be the output memory if out is not None: out[:] = initial else: out = initial.copy() # Iterate and reduce for arr in arr_iter: ufunc(out, arr, out=out) return out
f73c4e556763852450443825bc12224f791f7583
690,479
def _full_analysis_mp_alias(br_obj, analysis_set, output_directory, unique_name, verbose, quick_plots): """ Alias for instance method that allows the method to be called in a multiprocessing pool. Needed as multiprocessing does not otherwise work on object instance methods. """ return (br_obj, unique_name, br_obj.full_analysis(analysis_set, output_directory, verbose = verbose, compile_pdf = verbose, quick_plots = quick_plots))
6997d1c641154e0761b3f7304f09afecaa0a09ab
690,480
def GatherResultsFromMultipleFiles(results_by_file): """Gather multiple results to organize them by check name and file name. Args: results_by_file: A dict of check results indexed by file name. Returns: A dict of check results in the form of: {`check_name`: {`file_name`: { 'warning': { 'range': [lower_bound, upper_bound] 'count': number of occurrences that fall into the range, 'total': total number of data points, }, 'error': ... }}} """ merged = {} for filename, results in results_by_file.iteritems(): if results: for check_name, values in results.iteritems(): if check_name not in merged: merged[check_name] = {filename: values} else: merged[check_name][filename] = values return merged
eba58732330198b387c31b2d29d2e43089cb933d
690,482
import hashlib def _username_hash(username): """Returns bytes, a cryptographically safe one-way hash of the username. This way, if someone breaks the Fernet encryption, they still don't know the username. Args: username: unicode """ return hashlib.sha256(username.encode('utf-8')).digest()
45869c410ad53bfbffb38282f6cf43f56c892d77
690,485
def show_input(data): # 이거 각 setting으로 옮겨주기 """ 입력값이 올바른지 확인 :param data: 어떤 데이터든 가능 :return: 맞으면 True / 틀리면 False (type: boolean) """ print(data) confirm = input("입력을 재대로 하셨나요? Y/N: ") print("===========================================") if confirm.lower() == 'y': return True else: return False
8d45258dfcd1f33eeb36b4f51434779ae8f72b7e
690,487
import hashlib def get_sha_hash(input_string): """ Method returns the sha hash digest for a given string. Args: input_string (str): the input string for which sha has to be computed """ return hashlib.md5(input_string).digest()
58bebd717d53d3dec090031dec932cf7aa1d93b7
690,488
def generate_graphic_character_vocabulary(conn, min_coverage): """Generate a vocabulary of characters from graphic representations of lemmas with the specified minimal corpus coverage. This is the smallest vocabulary of the most frequent characters so that these characters together cover at least a portion of ``min_coverage`` of the corpus. :param conn: Database connection for statistics. :param float min_coverage: The minimal coverage. :return: A dictionary from characters from graphic representations of lemmas to their frequency rank. """ if min_coverage < 0 or min_coverage > 1: raise ValueError('The minimum coverage must be between 0 (inclusive) and 1 (inclusive)') if min_coverage == 0: return dict() return {graphic_c: rank for graphic_c, rank in conn.cursor().execute( '''SELECT graphic, rank FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character" AND count >= ( SELECT MAX(count) FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character" AND cumulative_count >= ( SELECT MAX(cumulative_count) FROM statistics WHERE language = "jpn" AND form = "lemma:graphic:character") * ?)''', (min_coverage,))}
e9b67082fb3ff6144fcaeec352140e5e3c5fef66
690,491
def blendTriangular(d, u=0.1, s=0.4, c=0.9): """ Triangular blending funciton, taken from eq. 3.5 c must be greater than s s must be greater than u u is the beginning point of the triangle c is the endpoint of the triangle s is the peak of the triangle """ d = float(d) u = float(u) s = float(s) c = float(c) if (s - u) == 0: return 0 if (c - s) == 0: return 0 if d <= u: b = 0.0 elif d > u and d <= s: b = (d - u)/(s - u) elif d > s and d < c: b = (c - d)/(c - s) else: b = 0.0 return b
0e7e093c1ba2eaab46810cf09e53a2255ccfd4ba
690,492
def ordenar_alinhamento(elemento_frasico, alinhamento): """ Ordena os pares alinhados conforme as frases originais. :param elemento_frásico: lista de tuplos com as informações (palavra/gesto, lema, classe gramatical) do elemento frásico :param alinhamento: dicionário com as palavras/gestos alinhados e as suas classes gramaticais :return: Lista com as palavra/gestos alinhados ordenados conforme a sua ordem na frase original. """ alinhamento_ordenado = [] for t in elemento_frasico: for k, v in alinhamento.items(): if k == t[1]: alinhamento_ordenado.append(v) return alinhamento_ordenado
6994fdc7576d2e1820f6edea25046c03f1589eaf
690,493
import getpass def ask_password(prompt="Password : ", forbiddens=[]): """Prompt the user for a password without echoing Keyword Arguments: prompt {str} -- the question message (default: {"Password : "}) forbiddens {list} -- the list of bad passwords (default: {[]}) Returns: str -- the appropriate input password """ password = getpass.getpass(prompt) if password not in forbiddens: return password else: return ask_password(prompt, forbiddens)
4b51af58a1eada7883ea2dace3bd0f263ee9772e
690,501
def get_id(first_name, last_name): """ :param first_name: The first_name to search for. :param last_name: The last_name to search for. :return: The id number for the given first/last name, otherwise None. """ with open("database.txt", "r") as file: for line in file: line = line.rstrip() if not line: continue first, last, _id = line.split(", ") if first_name == first and last_name == last: return _id return None
0ec8f4b24b0453474c1449909f7fb079b5b784dc
690,502
def check_file(filename): """Returns whether or not a file is considered a valid image""" ext = filename.split(".")[-1].lower() return ext == "jpg" or ext == "png" or ext == "jpeg"
b44ef445babbabd9b3ec4dde2b25bacacd2c6b4a
690,503
def get_q_id(hit): """ Returns the query ID for a hit. Parameters ---------- A hit parsed from an HHsearch output file, i.e. dict with at least the key 'alignment' which is a dict by itself and comes at least with the key 'Q xxx' where xxx is some identifier. The value for this 'Q xxx' key is a third dict which needs to contain the key 'sequence'. Returns ------- str : The query ID starting with 'Q '. Notes ----- Each 'hit' has one 'alignment', which comes with different lines. One of those lines is 'Q consensus'. Another line is called 'Q xxx' where xxx is the ID of the input query sequence. This function find this 'Q xxx' ID. We assume that there are only two line names starting with 'Q '. """ # find the right ID _id = [_id for _id in hit['alignment'].keys() if _id.startswith('Q') and _id != 'Q Consensus'][0] return _id
35f301ce5a4cad34e39a79c28475b76c979da46c
690,506
def split_version(version_string): """Parse a version string like 2.7 into a tuple.""" return tuple(map(int, version_string.split(".")))
48cea68ebcd84b2d8cf4d1a2cf15387664067a72
690,510
def is_icmp_reply(pkt, ipformat): """Return True if pkt is echo reply, else return False. If exception occurs return False. :param pkt: Packet. :param ipformat: Dictionary of names to distinguish IPv4 and IPv6. :type pkt: dict :type ipformat: dict :rtype: bool """ # pylint: disable=bare-except try: if pkt[ipformat['IPType']][ipformat['ICMP_rep']].type == \ ipformat['Type']: return True else: return False except: # pylint: disable=bare-except return False
21196c53c0e227602f8aaba984d56aeae3af2781
690,512
def validate_move(move, turn, board): """ Determine if the next move is valid for the current player :param move: :param turn: :param board: :return: boolean flag for if the move is valid as well as the current gamestate dictionary """ if turn == 1: piece = 'X' else: piece = 'O' try: if board[move[:-1]][move] not in ('X', 'O'): board[move[:-1]][move] = piece return True, board else: return False, board except KeyError: return False, board
b3117e72a8377aaceb5ee8c887a272bcb15ea553
690,518
def convert_name(name, to_version=False): """This function centralizes converting between the name of the OVA, and the version of software it contains. OneFS OVAs follow the naming convention of <VERSION>.ova :param name: The thing to covert :type name: String :param to_version: Set to True to covert the name of an OVA to the version :type to_version: Boolean """ if to_version: return name.rstrip('.ova') else: return '{}.ova'.format(name)
2800c22e2af5a6ad3d537a9713c473e6d44101c6
690,519
def meas_pruning_ratio(num_blobs_orig, num_blobs_after_pruning, num_blobs_next): """Measure blob pruning ratio. Args: num_blobs_orig: Number of original blobs, before pruning. num_blobs_after_pruning: Number of blobs after pruning. num_blobs_next: Number of a blobs in an adjacent segment, presumably of similar size as that of the original blobs. Returns: Pruning ratios as a tuple of the original number of blobs, blobs after pruning to original, and blobs after pruning to the next region. """ ratios = None if num_blobs_next > 0 and num_blobs_orig > 0: # calculate pruned:original and pruned:adjacent blob ratios print("num_blobs_orig: {}, blobs after pruning: {}, num_blobs_next: {}" .format(num_blobs_orig, num_blobs_after_pruning, num_blobs_next)) ratios = (num_blobs_orig, num_blobs_after_pruning / num_blobs_orig, num_blobs_after_pruning / num_blobs_next) return ratios
fabe113effdd97cffa31ccd9cda105b464a3163f
690,521
def count_str(S): """Takes a pd Series with at least the indices 'alt' and 'repeatunit', both strings. Return the number of occurances of repeatunit in alt""" if S['alt'] is None: return 0 count = S['alt'].count(S['repeatunit']) return count
0717727ff59a3b29e22502875c73323558554eec
690,525
def abs2(src, target): """ compute the square absolute value of two number. :param src: first value :param target: second value :return: square absolute value """ return abs(src - target) ** 2
275e015bca3cae2f737b284f40861b3136936109
690,527
def union(bbox1, bbox2): """Create the union of the two bboxes. Parameters ---------- bbox1 Coordinates of first bounding box bbox2 Coordinates of second bounding box Returns ------- [y0, y1, x0, x1] Coordinates of union of input bounding boxes """ y0 = min(bbox1[0], bbox2[0]) y1 = max(bbox1[1], bbox2[1]) x0 = min(bbox1[2], bbox2[2]) x1 = max(bbox1[3], bbox2[3]) return [y0, y1, x0, x1]
0cb11ca0925bfbb191070b701e032abeca32eea5
690,529