content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def uninterleave(data): """Given a stereo array, return separate left and right streams This function converts one array representing interleaved left and right audio streams into separate left and right arrays. The return value is a list of length two. Input array and output arrays are all Numpy arrays. See also: interleave() """ return data.reshape(2, len(data)/2, order='FORTRAN')
713e753898236c172b552b1fa245bd0e1feff0fd
691,533
import math def wien_deriv(x): """ The first derivative of the nonlinear equation in wien(). """ return 1 - 5 * math.exp(-x)
cd3836648921b932641c4d6b9d28729c8d6ef7a8
691,542
def find_production_volume_by_id(dataset, uuid): """Find production volume in ``dataset`` with id ``uuid``. Raises ``ValueError`` if ``uuid`` not found in dataset, or if the found exchange does not have a production volume.""" for exc in dataset['exchanges']: if exc['id'] == uuid: if 'production volume' not in exc: raise ValueError("Referenced exchange does not have a prod. volume") return exc['production volume'] raise ValueError("Exchange id {} not found in dataset".format(uuid))
96bf9a84d360df7e6173a02ef09b9fbcf223af5c
691,546
def factorial(x): """This is a recursive function to find the factorial of an integer""" if x == 1: return 1 else: return (x * factorial(x-1))
58bbb5794e6110a28653560654e7dd689def3bd8
691,548
def construct_lvol_store(client, bdev_name, lvs_name, cluster_sz=None): """Construct a logical volume store. Args: bdev_name: bdev on which to construct logical volume store lvs_name: name of the logical volume store to create cluster_sz: cluster size of the logical volume store in bytes (optional) Returns: UUID of created logical volume store. """ params = {'bdev_name': bdev_name, 'lvs_name': lvs_name} if cluster_sz: params['cluster_sz'] = cluster_sz return client.call('construct_lvol_store', params)
33463a7efd59a92ba68eecfca98277f6e273520c
691,549
def dxdt(y, z): """Computes the equation for x prime""" dx = -y - z return dx
5307deada35d965dafa6e32576d67335c522d775
691,551
def split_1_grams_from_n_grams(topics_weightings): """ Pair every words with their weightings for topics into dicts, for each topic. :param topics_weightings: it is a 3D list of shape [topics, number_of_top_words, 2] where the 2 is two entries such as (top_word, top_words_weighting). :return: Two arrays similar to the input array where the 1-grams were splitted from the n-grams. The n-grams are in the second array and the 1-grams in the first one. """ _1_grams = [[] for _ in range(len(topics_weightings))] _n_grams = [[] for _ in range(len(topics_weightings))] for i, topic_words_list in enumerate(topics_weightings): for word, weighting in topic_words_list: tuple_entries = (word, weighting) if ' ' in word: _n_grams[i].append(tuple_entries) else: _1_grams[i].append(tuple_entries) return _1_grams, _n_grams
cdb058d4ad718d578e2e11ec3d4ae9eb008224b4
691,555
def temporal_iou(span_A, span_B): """ Calculates the intersection over union of two temporal "bounding boxes" span_A: (start, end) span_B: (start, end) """ union = min(span_A[0], span_B[0]), max(span_A[1], span_B[1]) inter = max(span_A[0], span_B[0]), min(span_A[1], span_B[1]) if inter[0] >= inter[1]: return 0 else: return float(inter[1] - inter[0]) / float(union[1] - union[0])
184b83bf110e1831f4829500d8405e1898c52968
691,556
def _max(arr): """Maximum of an array, return 1 on empty arrays.""" return arr.max() if arr is not None and len(arr) > 0 else 1
72e2a6b5c3a6d09cd38e858211280704be58c838
691,558
def extract_average_token_length(**args): """ Example query feature that gets the average length of normalized tokens in the query„ Returns: (function) A feature extraction function that takes a query and returns the average normalized token length """ # pylint: disable=locally-disabled,unused-argument def _extractor(query, resources): tokens = query.normalized_tokens average_token_length = sum([len(t) for t in tokens]) / len(tokens) return {'average_token_length': average_token_length} return _extractor
ac1d98ede91b1a8d8b99e63093bb3f5a79747d59
691,560
def parse_bm_alleles_col(_str): """ Parse 'alleles' column of biomart response """ if _str == '': return [] else: return _str.split('/')
31e562608781e14decca447073b45bae1f20c8f4
691,562
def geometry_column_name(df): """Get geometry column name, fall back to 'geometry' Args: df (pandas.DataFrame): [description] Returns: geom_col (string): [description] """ try: geom_col = df.geometry.name except AttributeError: geom_col = 'geometry' return geom_col
46141489406ad9a542848bbe337e22af5e4e92fb
691,563
def initials(name): """ Converts a name to initials form. :param name: a string of words. :return: the string in initials form. """ return ".".join([x[0].upper() for x in name.split()[:-1]] + [name.split()[-1].title()])
893656321c24b5cc54054a753e388f9f0cda2e63
691,565
def should_update(iteration, epoch, settings): """ Tells whether it is time to update the plateaus or not :param iteration: iteration number :param epoch: epoch number :param settings: settings dictionary :return: True if it is time for an update, and False otherwise """ no_update = False if 'no_update' not in settings else settings['update']['no_update'] if no_update: return False return epoch == settings['update']['start_epoch'] or \ (epoch > settings['update']['start_epoch'] and epoch % settings['update']['frequency'] == 0) or \ (settings['update_first_iteration'] and iteration == 0)
bc14ddf7cf2c3a23245038a7870794ec8473b69f
691,566
def is_ref_name(name): """Check if a given name is a reference directory """ ext_name = name.split('/')[-1].split('.')[1] if ext_name == 'ref': return True else: return False
3dfdbb6222a61f6bb8fe57be87f670e066101248
691,567
import re def trimUnicode(s): """ Trims string s of unicode text """ return re.sub(r'[^\x00-\x7F]+',' ', s)
e3dfdcce51e3a7e808383ddd8f69803d69b4010b
691,568
def centroid(img): """ finds the centroid of the given image img input:: img (np.ndarray): input img to find the centroid of return:: centroid (tuple): centroid of the input image (height,width) """ centroid = img.shape[0]//2, img.shape[1]//2 return centroid
ec50349c7514f4b104ee598e864f8ecdd78cba09
691,569
import math def haversineDegreesToMeters(lat_1, lon_1, lat_2, lon_2): """ Haversine equation for finding the distance between two lat-lon points in meters. :param lat_1: first latitude point :param lon_1: first longitude point :param lat_2: second latitude point :param lon_2: second longitude point :returns: distance in meters :reference: http://www.movable-type.co.uk/scripts/latlong.html :reference: http://stackoverflow.com/questions/4102520/how-to-transform-a-distance-from-degrees-to-metres """ r = 6371000 delta_lat = math.radians(lat_2 - lat_1) delta_lon = math.radians(lon_2 - lon_1) a = ((math.sin(delta_lat / 2) ** 2) + math.cos(math.radians(lat_1)) * math.cos(math.radians(lat_2)) * (math.sin(delta_lon / 2) ** 2)) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return r * c
5c3a60e0b92d62dbe21169f40ffa99f067648fb6
691,571
def swap_1d(perm, i, j): """ Swap two elements of a 1-D numpy array in-place. Examples -------- >>> perm = np.array([2, 1, 2, 3, 4, 5, 6]) >>> swap_1d(perm, 2, 6) array([2, 1, 6, 3, 4, 5, 2]) """ perm[i], perm[j] = perm[j], perm[i] return perm
d120e9d2aa9f5d7268658698be5b31ec20a31a9d
691,576
def missing_branch(children): """Checks if the missing values are assigned to a special branch """ return any([child.predicate.missing for child in children])
75dd5f4cd503ae614023bc73c0119bebc9bb561e
691,577
def _replace_signature(raw_header: bytes, signature: bytes) -> bytes: """Replace the 'signature' field in a raw header.""" return signature + raw_header[8:]
4e1d37f445488f091a62133f733d67f7707a6229
691,579
def dummy_selector_a(node): """ Dummy node selector. """ return node['name'] in ('A', 'B', 'G')
02311dfa0c2f2a7ae37420b5b2fe61fb29d9aea6
691,583
import logging def check_batch(batch, max_length=None): """print data shapes and check max_length""" def _print(k, d): if k in d: print(f'\t{k}: {d[k].shape}') logging.info(f'Get protein_name: {batch["name"]}') for k in ['aatype', 'msa_feat', 'extra_msa', 'masked_msa_only']: _print(k, batch["feat"]) for k in ['all_atom_positions']: _print(k, batch["label"]) L = batch["feat"]['aatype'].shape[2] if not max_length is None and L > max_length: print(f'\tskip {batch["name"]} due to two long length') return False return True
d06862bc2bccab8f88998718e7c3351899a6d49d
691,589
def totaler(products): """Totals the total value of each product.""" totalDict = {'base': 0, 'VAT': 0, 'total': 0}; for h in products: totalDict['base'] += h['base']; totalDict['VAT'] += h['VAT']; totalDict['total'] += h['total']; return totalDict;
1c27cc2a69f44eb60d71cdd12ef887c8d1c7daf1
691,594
def snp_resultsHandover(variantId): """Create the resultsHanover dict by inserting the variantId into the template.""" resultsHandover = [ { "handoverType" : { "id" : "data:1106", "label" : "dbSNP ID" }, "note" : "Link to dbSNP database", "url" : f"https://www.ncbi.nlm.nih.gov/snp/?term={variantId}" }, { "handoverType" : { "id" : "data:1106", "label" : "dbSNP ID" }, "note" : "Link to dbSNP API", "url" : f"https://api.ncbi.nlm.nih.gov/variation/v0/beta/refsnp/{variantId[2:]}" } ] return resultsHandover
177d7437d759696538de70c26b326083d3cd22fc
691,596
def instance_supports_efa(instance_type: str) -> bool: """Checks if instance supports Amazon Elastic Fabric Adapter""" # https://docs.aws.amazon.com/en_us/AWSEC2/latest/UserGuide/efa-start.html return instance_type in ['c5n.18xlarge', 'i3en.24xlarge', 'p3dn.24xlarge']
7e05b87555e2de65b92df0dd6d84f31889493a08
691,598
def first_name(name): """ Return the first name of a name.""" if not isinstance(name, str): return name return name.split(' ')[0].title()
cd1ebfc43351ef05cadf23155bc25b37289c410a
691,600
import sqlite3 def get_user_review(user_id, prof_id, course_name): """ Fetches the user review from DB if exists. Otherwise returns None. """ cursor = sqlite3.connect('./db.sqlite3').cursor() cursor.execute("SELECT * FROM review NATURAL JOIN rating WHERE user_id = ? AND prof_id = ? AND course_name = ?;", (user_id, prof_id, course_name,)) r = cursor.fetchone() user_review = {} if r: user_review = {'review_id': r[0], 'user_id': r[1], 'text': r[2], 'date': r[3], 'semester': r[6], 'year': r[7], 'workload': r[8], 'learning': r[9], 'grading': r[10]} return user_review
59514104b1382157627325115bd5f041b961b3b9
691,601
def get_html_xml_path(path, build_name): """Parse and replace $BUILD_NAME variable in the path. Args: path(str): path to html report build_name(str): software build number Returns: str: modified path to html report """ try: return path.replace("__BUILD_NAME__", build_name) except AttributeError: return "undetermined"
204722413aea85221756eb53e677d265c20200b5
691,606
def capacity_cost_rule(mod, g, p): """ The capacity cost of projects of the *gen_spec* capacity type is a pre-specified number equal to the capacity times the per-mw fixed cost for each of the project's operational periods. """ return mod.gen_spec_capacity_mw[g, p] * mod.gen_spec_fixed_cost_per_mw_yr[g, p]
521348dc171961e0687661a20a6898a1eca3daf6
691,609
def get_locations(data: dict) -> list: """ Get users' locations from the dictionary. Return list of lists, every one of which contains user's nickname and location. >>> get_locations({'users': [{'screen_name': 'Alina', 'location':\ 'Lviv, Ukraine'}]}) [['Alina', 'Lviv, Ukraine']] """ result = [] users = data['users'] for user in users: name = user['screen_name'] location = user['location'] result.append([name, location]) return result
21c2cbc984e085d8b7b6da418e8184aa2d037cd2
691,612
def _label(label: str) -> str: """ Returns a query term matching a label. Args: label: The label the message must have applied. Returns: The query string. """ return f'label:{label}'
10472e6850923d2f35bdff1fb3603f82293a3d15
691,613
import torch def collate_fn(data): """Sorts the input(the mini-batch) by length and truncates all the data points to the max length of the input""" sorted_data = sorted(data, key=lambda x: x[2], reverse=True) max_len = sorted_data[0][2] x = torch.stack([x_i[:max_len] for x_i, y_i, l_i in sorted_data]) y = torch.stack([y_i[:max_len] for x_i, y_i, l_i in sorted_data]) l = [l_i for _, _, l_i in sorted_data] return x, y, l
4a04f4cf82b0db57ef9675d68887eced23662d0a
691,617
def seasonal_naive(data, n=7, **kwargs): """The seasonal naive forecast for the next point is the value observed ``n`` points prior in the series. The seasonal parameter (``n``) does not have units of time, but rather units of one observation. For example, to account for weekly cycles within daily observations, ``n=7``. Args: data (np.array): Observed data, presumed to be ordered in time. n (int): period of data seasonality Returns: float: a single-valued forecast for the next value in the series. """ forecast = data[-n] return forecast
a72ecc75a2c79f3e0add9659bf576a355deacfe3
691,618
import random def pick(obj): """randomly pick an element from object""" return random.sample(obj, 1)[0]
eded3083c775370dc5e2d23046934b98bfe1ef38
691,622
import re def probability_cube_name_regex(cube_name): """ Regular expression matching IMPROVER probability cube name. Returns None if the cube_name does not match the regular expression (ie does not start with 'probability_of'). Args: cube_name (str): Probability cube name """ regex = re.compile( '(probability_of_)' # always starts this way '(?P<diag>.*?)' # named group for the diagnostic name '(_in_vicinity|)' # optional group, may be empty '(?P<thresh>_above_threshold|_below_threshold|_between_thresholds|$)') return regex.match(cube_name)
6fafd524405def4490729846f2d4b705b14225a0
691,624
import re def adjust_name_for_printing(name): """ Make sure a name can be printed, alongside used as a variable name. """ if name is not None: name2 = name name = name.replace(" ", "_").replace(".", "_").replace("-", "_m_") name = name.replace("+", "_p_").replace("!", "_I_") name = name.replace("**", "_xx_").replace("*", "_x_") name = name.replace("/", "_l_").replace("@", '_at_') name = name.replace("(", "_of_").replace(")", "") if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None: raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name)) return name return ''
931df7fd3f6f456ead9b62692dea6bae31cf736f
691,625
import pickle def from_pickle(input_path): """Read network from pickle.""" with open(input_path, 'rb') as f: unpickler = pickle.Unpickler(f) background_mat = unpickler.load() return background_mat
9fd24fc422b9b15d831d9411ef9ba5537cf2c90d
691,626
def pres(gamma, dens, eint): """ Given the density and the specific internal energy, return the pressure Parameters ---------- gamma : float The ratio of specific heats dens : float The density eint : float The specific internal energy Returns ------- out : float The pressure """ return dens*eint*(gamma - 1.0)
3bdcdfd1dd280d9cfd397ba0c21bffce0d68bcb6
691,629
def compressed(x, selectors): """ compress('ABCDEF', [1,0,1,0,1,1]) --> A C E F """ return [d for d, s in zip(x, selectors) if s]
349ea3ffe35b135b5393650b5aff3d9a981c35d8
691,633
from typing import Any def flatten_dict(data: dict[str, Any], keys: list[str] = []) -> dict: """ Takes a dictionary containing key-value pairs where all values are of type other than `list` and flattens it such that all key-value pairs in nested dictionaries are now at depth 1. Args: data (dict): Dictionary containing non-list values keys (list[str], optional): Keys of `data` if `data` is a nested `dict` (`len(keys)` == depth of `data`). Defaults to []. Returns: dict: Flat dictionary containing all key-value pairs in `data` and its nested dictionaries """ flat_dict: dict[str, Any] = {} for key, value in data.items(): match value: case dict(): flat_dict = flat_dict | flatten_dict(value, [*keys, key]) case value: flat_dict['_'.join([*keys, key])] = value return flat_dict
1552225dd436e882685b90e237c761f2526b9244
691,634
import fnmatch def _table_matches_any_pattern(schema, table, patterns): """Test if the table `<schema>.<table>` matches any of the provided patterns. Will attempt to match both `schema.table` and just `table` against each pattern. Params: - schema. Name of the schema the table belongs to. - table. Name of the table. - patterns. The patterns to try. """ qual_name = '{}.{}'.format(schema, table) return any(fnmatch.fnmatch(qual_name, each) or fnmatch.fnmatch(table, each) for each in patterns)
d59e654b52f61e3b5335c2bee5dfdb960c0d8060
691,636
import typing import enum def _enum_labels( value: typing.Union[int, str, enum.Enum], enum_type: typing.Optional[typing.Type] = None, ) -> typing.Dict[int, str]: """ Gets the human friendly labels of a known enum and what value they map to. """ def get_labels(v): return getattr(v, 'native_labels', lambda: {})() return get_labels(enum_type) if enum_type else get_labels(value)
c48dece92922044050ad35f066bf303d2b7b9ac1
691,642
def logPlusOne(x): """ compute log(x + 1) for small x Args: x: Tensor Returns: Tensor log(x+1) """ eps=1e-4 mask = x.abs().le(eps).type_as(x) return x.mul(x.mul(-0.5) + 1.0) * mask + (x + 1.0).log() * (1.0 - mask)
5c5e621418490904323d7ca40213df66a23b6076
691,644
def calculate_directions(directions): """ Input: An iterable of (direction, unit) Output: A summarized collection of directions (x, y) """ x = 0 y = 0 for direction, unit in directions: assert direction in ['forward', 'down', 'up'] if direction == 'forward': x += unit elif direction == 'down': y += unit elif direction == 'up': y -= unit return x, y
f3b076f8cbf9e0559c418909132b37ee79f908c2
691,645
def inc(x): """ Increments number by one """ return x + 1
5720c02c0d8c2f10249a13951d420d8a958466a6
691,646
def calc_a(dert__): """ Compute vector representation of angle of gradient by normalizing (dy, dx). Numpy-broadcasted, first dimension of dert__ is a list of parameters: g, dy, dx Example ------- >>> dert1 = np.array([0, 5, 3, 4]) >>> a1 = calc_a(dert1) >>> print(a1) array([0.6, 0.8]) >>> # 45 degrees angle >>> dert2 = np.array([0, 450**0.5, 15, 15]) >>> a2 = calc_a(dert2) >>> print(a2) array([0.70710678, 0.70710678]) >>> print(np.degrees(np.arctan2(*a2))) 45.0 >>> # -30 (or 330) degrees angle >>> dert3 = np.array([0, 10, -5, 75**0.5]) >>> a3 = calc_a(dert3) >>> print(a3) array([-0.5 , 0.8660254]) >>> print(np.rad2deg(np.arctan2(*a3))) -29.999999999999996 """ return dert__[[2, 3]] / dert__[1]
d1be9a8eb2985776af45380248910d70ae09c840
691,647
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.5, 0.5)): """ Generate a list of windows for an image. Args: img: Input image x_start_stop: Start / stop position on the x axis (default to image width) y_start_stop: Start / stop position on the y axis (default to image height) xy_window: Window size for x and y xy_overlap: Percentage overlap between windows for the x and y axis. Returns: A list of windows (bounding boxes). """ image_width, image_height = (img.shape[1], img.shape[0]) # If x and/or y start/stop positions not defined, set to image size if x_start_stop[0] is None: x_start_stop[0] = 0 if x_start_stop[1] is None: x_start_stop[1] = image_width if y_start_stop[0] is None: y_start_stop[0] = 0 if y_start_stop[1] is None: y_start_stop[1] = image_height # Compute the span of the region to be searched xy_span = [x_start_stop[1] - x_start_stop[0], y_start_stop[1] - y_start_stop[0]] # Compute the number of pixels per step in x/y xy_step = [int(xy_window[0] * xy_overlap[0]), int(xy_window[1] * xy_overlap[1])] # Compute the number of windows in x/y windows_x = int(1 + (xy_span[0] - xy_window[0]) / (xy_window[0] * xy_overlap[0])) # 18 windows_y = int(1 + (xy_span[1] - xy_window[1]) / (xy_window[1] * xy_overlap[1])) # 10 # total_windows = windows_x * windows_y # Initialize a list to append window positions to window_list = [] # Loop through finding x and y window positions # Note: you could vectorize this step, but in practice # you'll be considering windows one by one with your # classifier, so looping makes sense for x_window in range(windows_x): for y_window in range(windows_y): # Calculate each window position x_start = x_start_stop[0] + x_window * xy_step[0] x_end = x_start + xy_window[0] y_start = y_start_stop[0] + y_window * xy_step[1] y_end = y_start + xy_window[1] bbox = ((x_start, y_start), (x_end, y_end)) # Append window position to list window_list.append(bbox) # Return the list of windows return window_list
39856ae488cbfb74f1865992e7ab7f5ffd971c62
691,651
def util_key_new ( schema, keys ): """Returns list of keys not in schema""" new_keys = [] for i in keys: if i not in schema: new_keys.append(i) return new_keys
3c856516bb6d20da865ac749e35de5f05c9dff66
691,653
import re def remove_redundant_path_slashes(path): """ If a relative filename contains multiple consecutive / characters (except at the beginning, in case of //server/host paths), remove them. >>> remove_redundant_path_slashes('/test//test2') '/test/test2' >>> remove_redundant_path_slashes('//test///test2') '//test/test2' >>> remove_redundant_path_slashes('') '' """ path_suffix = path[1:] path_suffix = re.sub(r'//+', '/', path[1:]) return path[0:1] + path_suffix
5cad7b7bf7d15b2d99894e3ef365c2b18d776c9b
691,654
import math def pose_dist(pose1, pose2): """Return Euclidean distance between two ROS poses.""" x1 = pose1.position.x y1 = pose1.position.y x2 = pose2.position.x y2 = pose2.position.y return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
59a884f078debee6a266612b3a5c480950df19eb
691,655
def _user_can_manage(user, partner): """ Whether this user can manage the given partner org """ if user.can_administer(partner.org): return True return user.get_partner() == partner and partner.org.editors.filter(pk=user.pk).exists()
33160eff14687921c127fb2ab199d8b1a406d90d
691,657
def average(sequence): """Calculate the mean across an array of e.g. read depths. Defaults to the mean calculated using numpy and falls back to the naive Python solution. Args: sequence (list): ``numpy.array`` or list of values Returns: float: calculated average value """ try: # first assume that numpy is installed for the fastest approach return sequence.mean() except AttributeError: # no numpy available, fall back to support regular list return sum(sequence) / len(sequence)
eb66b74166040b9556f2af3f4ef8c60d0fa91a97
691,658
def cver_t(verstr): """Converts a version string into a tuple""" if verstr.startswith("b"): return tuple([0,0,0,0]+list(cver_t(verstr[1:]))) return tuple([int(x) for x in verstr.split(".")])
73b4abd456551e678f44c0f940ad8d055993a345
691,660
def remove_markdown(body): """Remove the simple markdown used by Google Groups.""" return body.replace('*', '')
0510a76c1ce1ac68a684f954400ea3e162e2e529
691,661
def get_decades(year): """ Return 4 digit and 2 digit decades given 'year' """ if year: try: decade = year[2:3] + "0" decade2 = year[:3] + "0" except: decade = "" decade2 = "" else: decade = "" decade2 = "" return decade, decade2
fa5d84466e0b61e4dc88d157dbc22f1a61f231ad
691,673
def apply_weighting(object_to_be_weighted, weights): """ Replicate the components of object_to_be_weighted using the corresponding weights to define the number of replicates. Args: object_to_be_weighted: could be a list or an array weights: a list of integers Returns: the transformed object """ zipped = zip(object_to_be_weighted, weights) weighted_object = [] for item in zipped: for j in range(item[1]): weighted_object.append(item[0]) return weighted_object
51161b4ed6e6540390487c40613838083f00fd3b
691,675
def parse_timeout(arg): """Parse timeout argument""" if not arg: return None return int(arg)
e39af5bc323bb0ea32f1438a2459274627fd2f12
691,676
def doubleStuff(a_list): """ Return a new list in which contains doubles of the elements in a_list. """ new_list = [] for value in a_list: new_elem = 2 * value new_list.append(new_elem) return new_list
f34d76eb05fc8a07fb0bee41c701e18542d6554a
691,677
import statistics def median_of_counter_obj(counter): """ Calculate the weighted median of a counter obj :param counter: A counter obj """ items = [] for item in counter.items(): items += [item[0]] * int(item[1]) return statistics.median(items)
35327a66182e4482511b5dc511dd00bf45a9350a
691,678
def vowels_loop(value): """Count the number of vowels in a string, using a loop.""" count = 0 for char in value.lower(): if char in "aeiou": count += 1 return count
6f55e30283e9873933265568b5434cd014f3c825
691,681
def fixup(line): """Account for misformatted data from FFIEC with one-off fixups""" if line[0] == "2016" and line[1] == "0000021122" and len(line) == 23: return line[:6] + line[7:] return line
eccb0cb2afb34ec613efdf1272a6d884ddea2581
691,688
def make_region(chromosome, begin, end): """ Create region string from coordinates. takes 2 (1 for human 1-9) digit chromosome, begin and end positions (1 indexed)""" region = "chr" + str(chromosome) + ":" + str(begin) + "-" + str(end) return region
5108f01bd1ab49770073036e2ad1106e7da354dd
691,689
def _xml_tag_filter(s: str, strip_namespaces: bool) -> str: """ Returns tag name and optionally strips namespaces. :param s: Tag name :param strip_namespaces: Strip namespace prefix :return: str """ if strip_namespaces: ns_end = s.find("}") if ns_end != -1: s = s[ns_end + 1 :] else: ns_end = s.find(":") if ns_end != -1: s = s[ns_end + 1 :] return s
647ee8e2b1aca898b625b00e0e366e1292ddbed6
691,691
def format_actions(action_list): """ Returns the action list, initially a list with elements "[op][val]" like /2.0, -3.0, +1.0, formatted as a dictionary. The dictionary keys are the unique indices (to retrieve the action) and the values are lists ['op', val], such as ['+', '2.0']. """ return {idx: [action[0], float(action[1:])] for idx, action in enumerate(action_list)}
e2c0c15f19184d021fd09b9c93ae89e1aef4efae
691,693
def interpolate_temperature(temperature): """Transform temperature from degree celsius to 0.0 - 1.0 range 0.0 is -10 degrees (very cold) 1.0 is 35 degrees (very hot) Parameters: temperature - float in degrees celsius Returns: float normalized temperature """ return min(1.0, max(0.0, (10 + temperature) / 45))
b4807e24b6119d70bfbdf31bd19c5777f512773d
691,694
def lead(x, n = 1, default = None): """Return an array with each value replaced by the next (or further forward) value in the array. Arguments: x: a pandas Series object n: number of next values forward to replace each value with default: what to replace the n final values of the array with Example: >>> lead(pd.Series([1,2,3]), n=1) 0 2.0 1 3.0 2 NaN dtype: float64 >>> lead(pd.Series([1,2,3]), n=1, default = 99) 0 2 1 3 2 99 dtype: int64 """ res = x.shift(-1*n, fill_value = default) return res
c7c41355008c6691a01bcae31130ab0469543480
691,695
def get_and_check_size(iterator, n): """Check if the iterator is length n and return consumed elements. Consumes the next n+1 elements if possible (up to the end of the iterator). Returns (is_length_n, elems) """ elements = [] try: for _ in range(n): elements.append(next(iterator)) except StopIteration: return False, elements try: elements.append(next(iterator)) return False, elements except StopIteration: pass assert(len(elements) == n) return True, elements
e8a7f61f5346cdeccf0e3b67debabf6b9d20eae8
691,696
def square(x): """Return the square of x. >>> square(2) 4 >>> square(-2) 4 """ return x * x
a0cf408826163a0e3a123ff0b71330e09dd59286
691,697
def is_right_bracket(token): """ returns true if token is right bracket """ return token == ")"
0ce454bf48b1473e50f69ab3a6b44a8ceef5a081
691,700
def _make_CSV_line(username, language): """Return a WikiMetrics compatible CSV line.""" return "%s, %swiki" % (username, language)
6dc01ea0e225f19ca88e33ff77757e8cd047408e
691,701
import torch def pixelshuffle(x: torch.Tensor, dimensions: int, scale_factor: int) -> torch.Tensor: """ Apply pixel shuffle to the tensor `x` with spatial dimensions `dimensions` and scaling factor `scale_factor`. See: Shi et al., 2016, "Real-Time Single Image and Video Super-Resolution Using a nEfficient Sub-Pixel Convolutional Neural Network." See: Aitken et al., 2017, "Checkerboard artifact free sub-pixel convolution". Args: x: Input tensor dimensions: number of spatial dimensions, typically 2 or 3 for 2D or 3D scale_factor: factor to rescale the spatial dimensions by, must be >=1 Returns: Reshuffled version of `x`. Raises: ValueError: When input channels of `x` are not divisible by (scale_factor ** dimensions) """ dim, factor = dimensions, scale_factor input_size = list(x.size()) batch_size, channels = input_size[:2] scale_divisor = factor ** dim if channels % scale_divisor != 0: raise ValueError( f"Number of input channels ({channels}) must be evenly " f"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor})." ) org_channels = channels // scale_divisor output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]] indices = tuple(range(2, 2 + 2 * dim)) indices_factor, indices_dim = indices[:dim], indices[dim:] permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ()) x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:])) x = x.permute(permute_indices).reshape(output_size) return x
be1617e01eb43e958fcc5c38c4378ac9dede98ac
691,705
from pathlib import Path def ensure_parent(*args) -> Path: """ Ensure that the parent directory of a file exists. """ path = Path(*args) path.parent.mkdir(parents=True, exist_ok=True) return path
83ba38618ed7e3ddc790904824842a167d8e28d3
691,706
import math def get_utm_zone(lon: float, lat: float) -> str: """ Return the EPSG code of the corresponding wGS84 UTM zone given (lon, lat) coordinates. Args: lon (float): Longitude (in WGS84) lat (float): Latitude (in WGS84) Returns: str: The EPSG code of the corresponding UTM zone. Can be used directly to set crs in geopandas. """ utm_band = str((math.floor((lon + 180) / 6) % 60) + 1) if len(utm_band) == 1: utm_band = "0" + utm_band if lat >= 0: epsg_code = "326" + utm_band else: epsg_code = "327" + utm_band return f"EPSG:{epsg_code}"
45de051564738674d834273b9498e88f5500c7fe
691,711
import json def combine_api_shard_files(input_files, output_file=None): """ Merges the list of .json-formatted API shard files *input_files* into a single list of dictionaries, optionally writing the result to *output_file*. """ input_lists = [] print('Loading input files') for fn in input_files: input_lists.append(json.load(open(fn))) detections = [] # detection_list = input_lists[0] for detection_list in input_lists: assert isinstance(detection_list, list) # d = detection_list[0] for d in detection_list: assert 'file' in d assert 'max_detection_conf' in d assert 'detections' in d detections.extend([d]) print('Writing output') if output_file is not None: with open(output_file, 'w') as f: json.dump(detections, f, indent=1) return detections
ffc8f694ca38b077835b8c80071ce402ceaf6002
691,712
def _GetHeuristicSuspectedCLs(analysis): """Gets revisions of suspected cls found by heuristic approach.""" if analysis and analysis.suspected_cls: return [(cl['repo_name'], cl['revision']) for cl in analysis.suspected_cls] return []
bf678079f10e8fa14874b451cfb5a8e7f76e1878
691,716
def sliding_window_regions(start, end, window_size, step_size): """ sliding_window_regions ====================== This method will split a gene into different regions based on a sliding window and step size. Each region is based on the window size. The window is slid down the gene using the step size. Each step size results in a new window. For example, if the gene is ~67000 bp long, the window size is 1.5kb and the step size is 375 bp, then you would get ~180 overlapping regions, with each region having a size of 1.5kb. The first region will start at the start site of the gene and the last region will end at the end site of the gene. Parameters: ----------- 1) start: (int) The genomic start position of a gene 2) end: (int) The genomic end position of a gene 3) window_size: (int) The size of the window/region in bp to use for the sliding window 4) step_size: (int) The sliding step size in bp to slide the window across a gene. Returns: ++++++++ (list) A 2d list of regions created by sliding a window across the gene positions. Each inner list have the region start pos at index 0 and the end pos as index 1 """ start = int(start) end = int(end) window_size = int(window_size) step_size = int(step_size) ## Start and end of first region ## First region will start at the start of the gene window_start = start window_end = start + ( window_size - 1 ) ## The size of the region will include the start position to the end position. This accounts for a off by 1 error. gene_regions = [] ## Iterate over the gene range and get all regions while window_end < end: ## Add region gene_regions.append([window_start, window_end]) ## Slide the window by the step size window_start += step_size window_end += step_size ## Start and end of last region ## Last region will end at the end of the gene window_start = end - (window_size - 1) if end - (window_size - 1) > start else start window_end = end ## Add region gene_regions.append([window_start, window_end]) return gene_regions
775e7a8cfe79239465608133d4ad62d147502fab
691,717
from typing import Optional def dict_diff(d1: dict, d2: dict, no_key: Optional[str] = '<KEYNOTFOUND>') -> dict: """Compares two dictionaries Args: d1 (dict): First dictionary to compare d2 (dict): Second dictionary to compare no_key (Optional[str]): What value to use if key is not found Defaults to '<KEYNOTFOUND>'. Returns: dict: Comparison dictionary """ d1keys = set(d1.keys()) d2keys = set(d2.keys()) both = d1keys & d2keys diff = {k: (d1[k], d2[k]) for k in both if d1[k] != d2[k]} diff.update({k: (d1[k], no_key) for k in d1keys - both}) diff.update({k: (no_key, d2[k]) for k in d2keys - both}) return diff
2213b4f0a2b6da52220005aa1b9ed9f42cfa4075
691,718
def resolution_from_fsc(bins, fsc, value=0.5): """ Compute the resolution from the FSC curve Args: bins (array): The resolution bins (ordered from low resolution to high) fsc (array): The fsc in that resolution bin Returns: (bin index, bin value, fsc value) """ assert len(bins) == len(fsc) bin_index = len(bins) - 1 bin_value = bins[bin_index] fsc_value = fsc[bin_index] for i, (b, f) in enumerate(zip(bins, fsc)): if f < 0.5: bin_index = i bin_value = b fsc_value = f break return bin_index, bin_value, fsc_value
33ebfcb3d03703d6a5ceb5fe8d0220a28eafdb46
691,719
from pathlib import Path import json def Load_User_File(github_user): """ Load the contents of a JSON file Keyword arguments: github_user -- name of the file in the form <username>.json """ GITHUB_USER_PATH= "scripts/files/users/%s" % github_user my_file = Path(GITHUB_USER_PATH) # Are results cached ? if my_file.exists(): print ("Cached : ", GITHUB_USER_PATH) with open( GITHUB_USER_PATH, "r") as input_file: json_response = json.load(input_file) return json_response
a67a9993b4e512c46a1183c6f2b9e5a81ea162ed
691,720
from typing import Tuple def split_array(array_length: int, num_splits: int, split_id: int) -> Tuple[int, int]: """Split array into parts. Args: array_length: num_splits: split_id: Returns: start and end indices of the """ if not 0 <= split_id < num_splits: raise ValueError(f"gpu_id should be 0 <= {split_id} < {num_splits}") if array_length % num_splits == 0: step = int(array_length / num_splits) else: step = int(array_length / num_splits) + 1 return split_id * step, min((split_id + 1) * step, array_length)
988460b6bf8f16143da3f2f4f01acb336cd0490b
691,722
def process_tags(tags): """Process the provided tag information to correct format. Converts the list of information to a dictionary to be able to pass to the geometry creation step. Input: ----- tags: list of lists, [[tagname1, tagval1], [tagname2, tagval2], ...] Return: ------- tagdict: dict, key=TAGNAME, value=TAGVALUE """ tagdict = {} for tagset in tags: key = tagset[0] val = float(tagset[1]) tagdict[key] = val return tagdict
95f78cdfb3632f705e6403fe04a78746c6cfa7d0
691,726
def onsegment(p, q, r): """ Returns true if point q lies on segment pq, given three collinear points p, q, r. """ if q[0] <= max(p[0], r[0]) and q[0] >= min(p[0], r[0]) and q[1] <= max(p[1], r[1]) and q[1] >= min(p[1], r[1]): return True else: return False
1c48c9cfeddf00b155ddc63ae81c386ec1105d36
691,728
def _format_rotator_mode(value): """Format rotator mode, and rais appropriate error if it can't be formatted.""" modes = set(['pa', 'vertical', 'stationary']) if value.lower() not in modes: raise ValueError("Rotator mode must be in {!r}".format(modes)) return value.lower()
f56bcccaccfa4d6e68783c68b8cd32676b6b6584
691,729
def _find_gaps_split(datagap_times: list, existing_gap_times: list): """ helper for compare_and_find_gaps. A function to use in a loop to continue splitting gaps until they no longer include any existing gaps datagap_times = [[0,5], [30,40], [70, 82], [90,100]] existing_gap_times = [[10,15], [35,45], [75,80], [85,95]] split_dgtime = [[0, 5], [30, 40], [70, 75], [80, 82], [90, 100]] Parameters ---------- datagap_times list, list of two element lists (start time, end time) for the gaps found in the new data existing_gap_times list, list of two element lists (start time, end time) for the gaps found in the existing data Returns ------- list list of two element lists (start time, end time) for the new data gaps split around the existing data gaps """ split = False split_dgtime = [] for dgtime in datagap_times: for existtime in existing_gap_times: # datagap contains an existing gap, have to split the datagap if (dgtime[0] <= existtime[0] <= dgtime[1]) and (dgtime[0] <= existtime[1] <= dgtime[1]): split_dgtime.append([dgtime[0], existtime[0]]) split_dgtime.append([existtime[1], dgtime[1]]) split = True break if not split: split_dgtime.append(dgtime) else: split = False return split_dgtime
af1aaafa27725a033f9d34ff6f10c4288c9f96d9
691,734
def MRR(predictions, target): """ Compute mean reciprocal rank. :param predictions: 2d list [batch_size x num_candidate_paragraphs] :param target: 2d list [batch_size x num_candidate_paragraphs] :return: mean reciprocal rank [a float value] """ assert predictions.shape == target.shape assert predictions.ndim == target.ndim == 2 nrow, ncolumn = target.shape[0], target.shape[1] total_reciprocal_rank = 0 for i in range(nrow): for j in range(ncolumn): if target[i, predictions[i, j]] == 1: total_reciprocal_rank += 1.0 / (j + 1) break return total_reciprocal_rank / nrow
34b156fc3a38f23b6ad3ffae589c9afc773ec1ab
691,736
def isIn(obj, objs): """ Checks if the object is in the list of objects safely. """ for o in objs: if o is obj: return True try: if o == obj: return True except Exception: pass return False
0b19e6ac4d2ac2b290b0fe62bfd862c870708eac
691,737
def sem_of_rule(rule): """ Given a grammatical rule, this function returns the semantic part of it. """ return rule[1][1]
9746ad4c83e681f55c1497ea514637c293074b27
691,740
def htk_to_ms(htk_time): """ Convert time in HTK (100 ns) units to 5 ms """ if type(htk_time)==type("string"): htk_time = float(htk_time) return htk_time / 50000.0
5e177e8e5644e4171296826bc62b71f9803889a3
691,741
def find_diff_of_numbers(stat1, stat2): """ Finds the difference between two stats. If there is no difference, returns "unchanged". For ints/floats, returns stat1 - stat2. :param stat1: the first statistical input :type stat1: Union[int, float] :param stat2: the second statistical input :type stat2: Union[int, float] :return: the difference of the stats """ diff = "unchanged" if stat1 is None and stat2 is None: pass elif stat1 is None or stat2 is None: diff = [stat1, stat2] elif stat1 != stat2: diff = stat1 - stat2 return diff
b316c702e2a5d63a6dad4beac1dd59939a544aca
691,742
def pylong_join(count, digits_ptr='digits', join_type='unsigned long'): """ Generate an unrolled shift-then-or loop over the first 'count' digits. Assumes that they fit into 'join_type'. (((d[2] << n) | d[1]) << n) | d[0] """ return ('(' * (count * 2) + "(%s)" % join_type + ' | '.join( "%s[%d])%s)" % (digits_ptr, _i, " << PyLong_SHIFT" if _i else '') for _i in range(count-1, -1, -1)))
b3cda375fc2fbc922fcb7ecb7a4faa7bc581f7d8
691,745
import hashlib def hexdigest(s): """ Returns the sha256 hexdigest of a string after encoding. """ return hashlib.sha256(s.encode("utf-8")).hexdigest()
b4ded415c5e7bdf970d51c5973ea9b658ef70fe0
691,750
def factor_first_event(match_info, event_list, team_key): """ Creates factor for an event in event_list Arguments: event_list: list of 'Event' objects team_key: string of the event type in the 'Team' object, e.g. 'firstTower' Returns: -1 if no event did not happen yet 0 if red team did event 1 if blue team did event """ if len(event_list) > 0: first_event_team = match_info['teams'][0][team_key] return int(first_event_team) else: return -1
0691915ddc4fd81775068fa6a1fcda341cbedc3d
691,753
import re def str2num(s, tfunc=None): """Extracts numbers in a string. Parameters ---------- s : str The string. tfunc : None, optional formating function. Returns ------- list The number list. """ numstr = re.findall(r'-?\d+\.?\d*e*E?-?\d*', s) if tfunc is None: return numstr else: if tfunc == 'auto': numlist = [] for num in numstr: if num.find('.') > -1 or num.find('e') > -1: numlist.append(float(num)) else: numlist.append(int(num)) return numlist else: return [tfunc(i) for i in numstr]
aa735e99251ee681fd4eb94d160a5eaac13648e1
691,756
def escape_like(string, escape_char="\\"): """ Escape the string paremeter used in SQL LIKE expressions. :: from sqlalchemy_utils import escape_like query = session.query(User).filter( User.name.ilike(escape_like('John')) ) :param string: a string to escape :param escape_char: escape character """ return ( string.replace(escape_char, escape_char * 2) .replace("%", escape_char + "%") .replace("_", escape_char + "_") )
df8f805e50c5569910ad32b909db9a7db4b25b53
691,757
def create_bus(net, level, name, zone=None): """ Create a bus on a given network :param net: the given network :param level: nominal pressure level of the bus :param name: name of the bus :param zone: zone of the bus (default: None) :return: name of the bus """ try: assert level in net.LEVELS except AssertionError: msg = "The pressure level of the bus {} is not in {}".format(name, net.LEVELS) raise ValueError(msg) idx = len(net.bus.index) net.bus.loc[idx] = [name, level, zone, "NODE"] return name
920aab5009c387b53c92dbd8af64a8122abe18b3
691,758
def u2q(u1, u2, warnings=True): """ Convert the linear and quadratic terms of the quadratic limb-darkening parameterization -- called `u_1` and `u_2` in Kipping 2013 or `a` and `b` in Claret et al. 2013 -- and convert them to `q_1` and `q_2` as described in Kipping 2013: http://adsabs.harvard.edu/abs/2013MNRAS.435.2152K Parameters ---------- u1 : float Linear component of quadratic limb-darkening u2 : float Quadratic component of quadratic limb-darkening Returns ------- (q1, q2) : tuple of floats Kipping (2013) style quadratic limb-darkening parameters """ q1 = (u1 + u2)**2 q2 = 0.5*u1/(u1+u2) if warnings and (u1 < 0 or u2 < 0): print("WARNING: The quadratic limb-darkening parameters " + "u1={0:.3f} or u2={0:.3f} violate Kipping's ".format(u1, u2) + "conditions for a monotonically increasing or everywhere-" + "positive intensity profile. Returning them as is.") return q1, q2
baa934c792be8e0b72a9ede9a1431f356f9496fa
691,759
def divide(x, y): """Divide 2 numbers""" return (x / y)
a46d9906da6f9c028ea3f3cb1db67c64775d0d07
691,765
def get_indexed_attestation_participants(spec, indexed_att): """ Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. """ return list(indexed_att.attesting_indices)
5b37fe2628ec906879905da2ff9e433ac4bc16d3
691,768
def nths(x, n): """ Given a list of sequences, returns a list of all the Nth elements of all the contained sequences """ return [l[n] for l in x]
d37cf578d9fa7d1bdbabe951574b30ea2bb608eb
691,769