content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def run_bq_query(client, query, timeout): """ Returns the results of a BigQuery query Args: client: BigQuery-Python bigquery client query: String query timeout: Query timeout time in seconds Returns: List of dicts, one per record; dict keys are table field names and values are entries """ job_id, _results = client.query(query, timeout=timeout) complete, row_count = client.check_job(job_id) if complete: results = client.get_query_rows(job_id) print('Got %s records' %row_count) else: raise RuntimeError('Query not complete') return(results)
1336b884b32d15e7bcb5b97ef8b2b6922d775e77
690,533
def create_headers(bearer_token): """Create headers to make API call Args: bearer_token: Bearer token Returns: Header for API call """ headers = {"Authorization": f"Bearer {bearer_token}"} return headers
5870332cf71800d0bcfbd739cc16db85508111bc
690,534
def get_ship_name(internal_name): """ Get the display name of a ship from its internal API name :param internal_name: the internal name of the ship :return: the display name of the ship, or None if not found """ internal_names = { "adder": "Adder", "alliance-challenger": "Alliance Challenger", "alliance-chieftain": "Alliance Chieftain", "alliance-crusader": "Alliance Crusader", "anaconda": "Anaconda", "asp-explorer": "Asp Explorer", "asp-scout": "Asp Scout", "beluga-liner": "Beluga Liner", "cobra-mk-iii": "Cobra MkIII", "cobra-mk-iv": "Cobra MkIV", "diamondback-explorer": "Diamondback Explorer", "diamondback-scout": "Diamondback Scout", "dolphin": "Dolphin", "eagle": "Eagle", "federal-assault-ship": "Federal Assault Ship", "federal-corvette": "Federal Corvette", "federal-dropship": "Federal Dropship", "federal-gunship": "Federal Gunship", "fer-de-lance": "Fer-de-Lance", "hauler": "Hauler", "imperial-clipper": "Imperial Clipper", "imperial-courier": "Imperial Courier", "imperial-cutter": "Imperial Cutter", "imperial-eagle": "Imperial Eagle", "keelback": "Keelback", "krait-mk-ii": "Krait MkII", "krait-phantom": "Krait Phantom", "mamba": "Mamba", "orca": "Orca", "python": "Python", "sidewinder": "Sidewinder", "type-6": "Type-6 Transporter", "type-7": "Type-7 Transporter", "type-9": "Type-9 Heavy", "type-10": "Type-10 Defender", "viper-mk-iii": "Viper MkIII", "viper-mk-iv": "Viper MkIV", "vulture": "Vulture", } if internal_name in internal_names: return internal_names[internal_name] return None
ee23f2b7e97df0b74b2006a8a0a4782201137de7
690,535
def user_input(passcode: str) -> str: """Get the passcode from the user.""" code = input(f"Type the numerical value of the passcode `{passcode}`: ") return code
05449c0106382bd566f2fd1af26c5c0c198b5b13
690,536
import glob def getRansomwareFiles(path): """ Return all the ransomware files (sorted) from a given path """ try: all_file_names = [i for i in glob.glob(str(path) + '/*_labeled.*')] all_file_names = sorted(all_file_names) return all_file_names except: print("Ransomware samples could not be read") return
ec97bbcbee0cf0900370f41dc4d21d3ecc6b2233
690,540
def to_dynamic_cwd_tuple(x): """Convert to a canonical cwd_width tuple.""" unit = "c" if isinstance(x, str): if x[-1] == "%": x = x[:-1] unit = "%" else: unit = "c" return (float(x), unit) else: return (float(x[0]), x[1])
ab0b74097a2513b7ee44aaa23cb9fa6cfb864ed0
690,543
import typing def attach(object: typing.Any, name: str) -> typing.Callable: """Return a decorator doing ``setattr(object, name)`` with its argument. >>> spam = type('Spam', (object,), {})() >>> @attach(spam, 'eggs') ... def func(): ... pass >>> spam.eggs # doctest: +ELLIPSIS <function func at 0x...> """ def decorator(func): setattr(object, name, func) return func return decorator
32f2d5beaf3114e8724f380fb691b128b920d3eb
690,547
import email def process(data): """Extract required data from the mail""" mail = email.message_from_string(data[1]) return { 'date': mail['Date'], 'to': mail['To'], 'from': mail['From'], 'message': mail.get_payload() }
8cac6adbc212614d3c93cdb784a644e8f2a6d964
690,549
def test_in(value, seq): """Check if value is in seq. Copied from Jinja 2.10 https://github.com/pallets/jinja/pull/665 .. versionadded:: 2.10 """ return value in seq
7e42d027af4aecfc6cc6f9a93b6ee07eba3459a8
690,550
def make_copy_files_rule(repository_ctx, name, srcs, outs): """Returns a rule to copy a set of files.""" # Copy files. cmds = ['cp -f "{}" "$(location {})"'.format(src, out) for (src, out) in zip(srcs, outs)] outs = [' "{}",'.format(out) for out in outs] return """genrule( name = "{}", outs = [ {} ], cmd = \"""{} \""", )""".format(name, "\n".join(outs), " && \\\n".join(cmds))
fcc562b6ce7e8fe865d412b49fcd57f82f661945
690,553
def json_serializer(obj): """ A JSON serializer that serializes dates and times """ if hasattr(obj, 'isoformat'): return obj.isoformat()
d70e4488091b00c753d820556da485d31e49eb84
690,556
import re def strip(text): """ python's str.strip() method implemented using regex Args: text (str): text to strip of white space Returns: textStripped (str): text stripped of white space """ stripStartRegex = re.compile(r'(^\s*)') stripEndRegex = re.compile(r'(\s*$)') textStartStripped = stripStartRegex.sub('', text) textStripped = stripEndRegex.sub('', textStartStripped) return textStripped
e68864333a39beab2c0af5e74ea1c983ac9035ca
690,557
from typing import Union import re def get_zone_id_from_item_name(item_name: str) -> Union[str, None]: """ Extract and return the zone id from the the item name. """ pattern = '([^_]+)_([^_]+)_(.+)' match = re.search(pattern, item_name) if not match: return None level_string = match.group(1) location = match.group(2) return level_string + '_' + location
3bd0869ddb4903343589e31436d8ad11020f5bf5
690,560
import collections def replace_key_in_order(odict, key_prev, key_after): """Replace `key_prev` of `OrderedDict` `odict` with `key_after`, while leaving its value and the rest of the dictionary intact and in the same order. """ tmp = collections.OrderedDict() for k, v in odict.items(): if k == key_prev: tmp[key_after] = v else: tmp[k] = v return tmp
118b6e443fb36aac3154af48dcb55e908e2f31b3
690,564
def _which(repository_ctx, cmd, default = None): """A wrapper around repository_ctx.which() to provide a fallback value.""" result = repository_ctx.which(cmd) return default if result == None else str(result)
bd971599fbb77bf7eb504946ef2f901e877ed9b1
690,565
from typing import Callable def composed(*decorators: Callable) -> Callable: """ Build a decorator by composing a list of decorator """ def inner(f: Callable) -> Callable: for decorator in reversed(decorators): f = decorator(f) return f return inner
ec6ef95e2cd3616d67ea76ca71519e4ee7703c01
690,570
def split_user_goals(all_user_goals): """ Helper method to split the user goals in two sets of goals, with and without request slots """ user_goals_no_req_slots = [] user_goals_with_req_slots = [] for user_goal in all_user_goals: if len(user_goal["request_slots"].keys()) == 0: user_goals_no_req_slots.append(user_goal) else: user_goals_with_req_slots.append(user_goal) return user_goals_no_req_slots, user_goals_with_req_slots
1d1d536ec78f89aaa49135512648aa30ea2142f3
690,571
def get_phi0(self, b_, bp_): """ Get the reduced density matrix element corresponding to many-body states b and bp. Parameters ---------- self : Builder or Approach The system given as Builder or Approach object. b_,bp_ : int Labels of the many-body states. Returns -------- phi0bbp : complex A matrix element of the reduced density matrix (complex number). """ b = self.si.states_order[b_] bp = self.si.states_order[bp_] bcharge = sum(self.si.get_state(b)) bpcharge = sum(self.si.get_state(bp)) phi0bbp = 0.0 if self.funcp.kerntype == 'Pauli': if b == bp: ind = self.si.get_ind_dm0(b, b, bcharge, maptype=1) phi0bbp = self.phi0[ind] elif bcharge == bpcharge: ind = self.si.get_ind_dm0(b, bp, bcharge, maptype=1) conj = self.si.get_ind_dm0(b, bp, bcharge, maptype=3) if ind != -1: if type(self.si).__name__ == 'StateIndexingDMc': phi0bbp = self.phi0[ind] else: ndm0, npauli = self.si.ndm0, self.si.npauli phi0bbp = (self.phi0[ind] + 1j*self.phi0[ndm0-npauli+ind] * (+1 if conj else -1) * (0 if ind < npauli else 1)) return phi0bbp
3e22545e7836cf8bf5c6b8ebb46518c1ec5cc114
690,572
import torch def train_test_split(dataset, params): """Grabs random Omniglot samples and generates test samples from same class. The random seed is taken from params.sampler_seed, the test_shift is which sample to grab as a test. If it ends up being a different class, the sampler is walked back until the class is same, and the sample is different. Args: dataset: (Dataset) Sampler from Omniglot dataset. params: (json dict) Params.json file. Returns: train_dataloader, test_dataloader: (tuple) Containing matched train test pairs. """ train_dataset = [] test_dataset = [] # Random seed from params file. torch.manual_seed(params.sampler_seed) # Create batch_size random indices from dataset. # Subtract params.test_shift so that we don't pick a random sample # so close to the end of the set that it looks for a test pair in # the blackness of 'index out of range'. idxs = torch.randint(len(dataset) - params.test_shift, (1, params.batch_size)) # Make sure one of them is our control. idxs[0, 0] = 19 for i, idx in enumerate(idxs[0]): shift_idx = params.test_shift train_sample, train_lbl = dataset[idx] test_sample, test_lbl = dataset[idx + shift_idx] # Make sure labels are the same, and it is not the same sample. while (train_lbl != test_lbl) or (torch.equal(train_sample, test_sample)): test_sample, test_lbl = dataset[idx + shift_idx] shift_idx -= 1 train_dataset.append(train_sample) test_dataset.append(test_sample) #=====MONITORING=====# # Uncomment to see train_samples or change selection to test_sample. # utils.animate_weights(train_sample, auto=True) #=====END MONITORING=====# train_dataloader = torch.stack(train_dataset) train_dataloader.unsqueeze_(1) test_dataloader = torch.stack(test_dataset) test_dataloader.unsqueeze_(1) return train_dataloader, test_dataloader
a044bbc2467c1d4ee3ad3424ff59ea3ceb3d735d
690,575
import math def dist(a, b): """ euclidean distance """ return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
62393373ff9cbf2a42c412e88118b7bae5798bcb
690,584
def gt10(val): """ Predicate testing if a value is less than 10 """ return val > 10
70fcfcdb444873fc586f4bf38e5167a5f8099eda
690,589
def gender(mention): """ Compute gender of a mention. Args: mention (Mention): A mention. Returns: The tuple ('gender', GENDER), where GENDER is one of 'MALE', 'FEMALE', 'NEUTRAL', 'PLURAL' and 'UNKNOWN'. """ return "gender", mention.attributes["gender"]
9b7fab2ca688662c5e3c7a5a24e05d6aa739ed15
690,591
def GetBytes(byte, size): """Get a string of bytes of a given size Args: byte: Numeric byte value to use size: Size of bytes/string to return Returns: A bytes type with 'byte' repeated 'size' times """ return bytes([byte]) * size
887721f9777af3124d134be47b0a9959ed4b40af
690,592
import re def string_to_tags_list(string): """ Given a string representing tags in TiddlyWiki format parse them into a list of tag strings. """ tags = [] tag_matcher = re.compile(r'([^ \]\[]+)|(?:\[\[([^\]]+)\]\])') for match in tag_matcher.finditer(string): if match.group(2): tags.append(match.group(2)) elif match.group(1): tags.append(match.group(1)) return tags
20b3df498304902000e37f822023ae2276eb18be
690,593
def beginsField(line): """ Does the given (stripped) line begin an epytext or ReST field? """ if line.startswith("@"): return True sphinxwords = """ param params return type rtype summary var ivar cvar raises raise except exception """.split() for word in sphinxwords: if line.startswith(":" + word): return True return False
5500fe3a165ac743f9a371fb120c7119e23eb54c
690,595
from threading import local def make_tls_property(default=None): """Creates a class-wide instance property with a thread-specific value.""" class TLSProperty(object): def __init__(self): self.local = local() def __get__(self, instance, cls): if not instance: return self return self.value def __set__(self, instance, value): self.value = value def _get_value(self): return getattr(self.local, 'value', default) def _set_value(self, value): self.local.value = value value = property(_get_value, _set_value) return TLSProperty()
b923a72a63908affc637e31208714100b0093ff2
690,598
def _transform_session_persistence(persistence): """Transforms session persistence object :param persistence: the session persistence object :returns: dictionary of transformed session persistence values """ return { 'type': persistence.type, 'cookie_name': persistence.cookie_name }
b703cf02099c42df24cf110e3a96693adabca5d7
690,601
def byte_notation(size: int, acc=2, ntn=0): """Decimal Notation: take an integer, converts it to a string with the requested decimal accuracy, and appends either single (default), double, or full word character notation. - Args: - size (int): the size to convert - acc (int, optional): number of decimal places to keep. Defaults to 2. - ntn (int, optional): notation name length. Defaults to 0. - Returns: - [tuple]: 0 = original size int unmodified; 1 = string for printing """ size_dict = { 1: ['B', 'B', 'bytes'], 1000: ['k', 'kB', 'kilobytes'], 1000000: ['M', 'MB', 'megabytes'], 1000000000: ['G', 'GB', 'gigabytes'], 1000000000000: ['T', 'TB', 'terabytes'] } return_size_str = '' for key, value in size_dict.items(): if (size / key) < 1000: return_size_str = f'{size / key:,.{acc}f} {value[ntn]}' return size, return_size_str
614bdfd883f0d875abe22e05186d2073380497b3
690,605
import copy def _DeepCopySomeKeys(in_dict, keys): """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. Arguments: in_dict: The dictionary to copy. keys: The keys to be copied. If a key is in this list and doesn't exist in |in_dict| this is not an error. Returns: The partially deep-copied dictionary. """ d = {} for key in keys: if key not in in_dict: continue d[key] = copy.deepcopy(in_dict[key]) return d
2d759603ad7cf1ada5741333be138f57957677f6
690,606
def prandtl(cp=None, mu=None, k=None, nu=None, alpha=None): """ Calculate the dimensionless Prandtl number for a fluid or gas. .. math:: Pr = \\frac{c_p \\mu}{k} = \\frac{\\nu}{\\alpha} Parameters ---------- cp : float Specific heat [J/(kg⋅K)] mu : float Dynamic viscosity [kg/(m⋅s)] k : float Thermal conductivity [W/(m⋅K)] nu : float, optional Kinematic viscosity [m²/s] alpha : float, optional Thermal diffusivity [m²/s] Returns ------- pr : float Prandtl number [-] Examples -------- >>> prandtl(cp=4188, mu=0.001307, k=0.5674) 9.647 >>> prandtl(nu=1.5064e-5, alpha=2.1002e-5) 0.71726 Raises ------ ValueError Must provide (cp, mu, k) or (nu, alpha) References ---------- Daizo Kunii and Octave Levenspiel. Fluidization Engineering. Butterworth-Heinemann, 2nd edition, 1991. """ if cp and mu and k: pr = (cp * mu) / k elif nu and alpha: pr = nu / alpha else: raise ValueError('Must provide (cp, mu, k) or (nu, alpha)') return pr
62a9ce6b458373d93c9cb4df4ccb705968e4a8b6
690,610
def sentence_selection(sentences): """ select sentences that are not only space and have more than two tokens """ return [ sent.strip() for sent in sentences if (sent or not sent.isspace()) and len(sent.split()) > 2 ]
0e6ec6082c39a2e2728b5813ecd463b7f2731b65
690,611
def check_consistency(header1, header2): """ Return true if all critical fields of *header1* equal those of *header2*. """ return (header1.Station_Name == header2.Station_Name and header1.IAGA_CODE == header2.IAGA_CODE and header1.Geodetic_Latitude == header2.Geodetic_Latitude and header1.Geodetic_Longitude == header2.Geodetic_Longitude and header1.Elevation == header2.Elevation and header1.Reported == header2.Reported and header1.Sensor_Orientation == header2.Sensor_Orientation and header1.Digital_Sampling == header2.Digital_Sampling and header1.Data_Interval_Type == header2.Data_Interval_Type and header1.Data_Type == header2.Data_Type)
7166f8acc10a5401364fc987a6b1b5b1e381a793
690,612
def is_feature_component_start(line): """Checks if a line starts with '/', ignoring whitespace.""" return line.lstrip().startswith("/")
f9ce6e88987f86e0b2116252e0aaa9fd449de567
690,613
def header(img, author, report_date, report_time, report_tz, title) -> str: """Creates reports header Parameters ---------- img : str Image for customizable report author : str Name of author responsible by report report_date : str Date when report is run report_time : str Time when report is run report_tz : str Timezone associated with datetime of report being run title : str Title of the report Returns ------- str HTML code for interactive tabs """ return f""" <div style="display:flex; margin-bottom:1cm;"> {img} <div style="margin-left:2em"> <p><b>Analyst:</b> {author}</p> <p><b>Date :</b> {report_date}</p> <p><b>Time :</b> {report_time} {report_tz}</p> <br/> <p>{title}</p> </div> </div>"""
484176e93ff7dcfc70e3c2adc54874a3a650d57c
690,615
import requests def handle_response(r, http_method, custom_err): """ Handles the HTTP response and returns the JSON Parameters ---------- r: requests module's response http_method: string "GET", "POST", "PUT", etc. custom_err: string the custom error message if any Returns ------- json : dict """ json = {} if r.status_code == requests.codes.ok: if r.text: json = r.json() else: print("{0} returned an empty response.".format(http_method)) else: if custom_err is not None: print(custom_err) print("Status code: " + str(r.status_code)) if r.text: print(r.text) r.raise_for_status() return json
e22799a1e841263f74dc22adda76d1897f7fe725
690,619
from typing import OrderedDict def _Net_blobs(self): """ An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name """ return OrderedDict([(bl.name, bl) for bl in self._blobs])
6c68e91a10fb9eda6c0be9ef03ffdd08823131d8
690,620
def is_cg_developed(properties): """Check if a colorgroup is fully developed.""" # check validity of colorgroup assert len(set([c.color for c in properties])) == 1 return all([c.has_hotel for c in properties])
9ef92a7695b9deb73d2fdd78dc13df44b70599a1
690,622
def _matrix_to_vector(X): """ Returns a vector from flattening a matrix. """ u = X.reshape((1, -1)).ravel() return u
500290449bc0bac48b6ca6b7f59bf2f0d39bd216
690,623
def pad_guid_bytes(raw_bytes: bytes) -> bytes: """Pads a sequence of raw bytes to make them the required size of a UUID. Note that if you're using an int as your source for instantiating a UUID, you should not use this function. Just use UUID(your_int_here). """ if not (0 < len(raw_bytes) <= 16): raise ValueError("Byte length must be between 1 and 16.") return raw_bytes + (b'\x00' * (16 - len(raw_bytes)))
b1cf8f50a041ac63be1c208388b904f774e437f3
690,626
def file_info_equal(file_info_1, file_info_2): """Return true if the two file-infos indicate the file hasn't changed.""" # Negative matches are never equal to each other: a file not # existing is not equal to another file not existing. if (None, None, None) in (file_info_1, file_info_2): return False # Equal if the size and the mtimes match. if file_info_1[:2] == file_info_2[:2]: return True # Even if mtimes don't match, they're equal if the size and the # crcs match. But we have to be careful, since crcs are optional, # so we don't do this test if the crcs are None. if file_info_1[2] is not None and file_info_1[1:] == file_info_2[1:]: return True return False
0cc17448dba034d65521c86a0f6d7c70b98cf02c
690,627
import random def generateIndices(n_blocks, N, D): """ generates indices for block matrix computation. Checked. Input: n_blocks: number of blocks to use. N: number of samples. D: number of genes. Output: y_indices_to_use[i][j] is the indices of block j in sample i. """ y_indices_to_use = [] idxs = list(range(D)) n_in_block = int(1. * D / n_blocks) for i in range(N): partition = [] random.shuffle(idxs) n_added = 0 for block in range(n_blocks): start = n_in_block * block end = start + n_in_block if block < n_blocks - 1: idxs_in_block = idxs[start:end] else: idxs_in_block = idxs[start:] partition.append(sorted(idxs_in_block)) n_added += len(idxs_in_block) y_indices_to_use.append(partition) if i == 0: print('Block sizes', [len(a) for a in partition]) assert(n_added == D) return y_indices_to_use
8850db07af5811846cd80f6225b2a56b71284dc2
690,631
def compute_delays(SOA): """ calculate the delay time for color/word input positive SOA => color is presented earlier, v.v. Parameters ---------- SOA : int stimulus onset asynchrony == color onset - word onset Returns ------- int,int the delay time for color/word input, repsectively """ color_delay = max(0, -SOA) word_delay = max(0, SOA) return color_delay, word_delay
3c048633501e75f0a46dc99d225819c9d0750a74
690,637
import math def dist(p,q): """ Helper function to compute the "distance" of two 2D points. """ return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)
d15da20e31627aef4fb589fcbc6bae25adf7c32d
690,644
from typing import Dict def sra_id_to_app_input(sra_id: str) -> Dict: """Generate input from app for sra_fastq_importer Set split files to false so we no merging is needed Args: sra_id: Returns: dictionary containing """ return {"accession": sra_id, "split_files": False}
bf1ca62df98932a05cb6fce476a361273f86c35e
690,645
def div(num1, num2): """ Divide two numbers """ return num1 / num2
2d39f276196d913f6393335e5fbbdf5998a37a89
690,646
from datetime import datetime def to_excel_ts(ts): """ Converts a datetime timestamp into Excel format date time """ EPOCH = datetime(1899, 12, 30) delta = ts - EPOCH return float(delta.days) + (float(delta.seconds) / 86400)
7fa466aafa75254d468d4969c4c4c666abc09aaa
690,647
def get_content_id(item): """Extract content id or uri.""" if item.item_class == "object.item.audioItem.musicTrack": return item.get_uri() return item.item_id
d4041481995ecea12aaaf6e0d60d82e4f31e8e1c
690,648
def getkey(dict_, key, default=None): """Return dict_.get(key, default) """ return dict_.get(key, default)
fd874b56862e5d6094ea26b6dc30bc34c22ea496
690,650
from datetime import datetime def _to_collected_format(date): """Convert input date format from '%Y%-m-%d' to '%Y%m%d'""" return str(datetime.strptime(date, "%Y-%m-%d").strftime("%Y%m%d"))
ec9dd77f6ff58d26e3059b595f32c78ff0996c36
690,652
def _unpersist_broadcasted_np_array(broadcast): """ Unpersist a single pyspark.Broadcast variable or a list of them. :param broadcast: A single pyspark.Broadcast or list of them. """ if isinstance(broadcast, list): [b.unpersist() for b in broadcast] else: broadcast.unpersist() return None
dbd43e27db1bad87a8b86f1e9e24a40ad4bfa558
690,653
def _to_str(s): """Downgrades a unicode instance to str. Pass str through as-is.""" if isinstance(s, str): return s # This is technically incorrect, especially on Windows. In theory # sys.getfilesystemencoding() should be used to use the right 'ANSI code # page' on Windows, but that causes other problems, as the character set # is very limited. return s.encode('utf-8')
6df44a8c56bdadf767e11d5d784c83fe0bd842cc
690,655
def Intersect(list1,list2): """ This function takes two lists and returns a list of items common to both lists. """ ReturnList = [] for x in list1: if x in list2: ReturnList.append(x) return ReturnList
f190ae7723b7cccfbd144b6df500ce8cf8e2ead2
690,657
import math def euler_problem_9(n=1000): """ A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a^2 + b^2 = c^2 For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. """ assert n > 10 # first assume that a <= b < c < n/2. Then, for c to be an integer we can't have a=b. # hence assume that a < b < c and that n/3 < 3. # brute-force O(n^2) approach for c in range(n // 2, n // 3, -1): c_sq = c ** 2 for b in range(c - 1, int(c / math.sqrt(2)), -1): a = n - c - b if a ** 2 + b ** 2 == c_sq: return a * b * c return -1
a4db0c09619d977027a45972abf092ec28062e7e
690,658
import itertools def _get_inputs(values): """ Generate the list of all possible ordered subsets of values. """ power_set = set( [ tuple(set(prod)) for prod in itertools.product(values, repeat=len(values)) ] ) power_perms = [itertools.permutations(comb) for comb in power_set] ordered_power_set = [] for perm in power_perms: for item in perm: ordered_power_set.append(list(item)) return ordered_power_set
5150d08954f9867a5d7c1fca3bb4384f4e89ff59
690,659
import re def next_look_say(current_value): """ Given a numeric string, find it's 'look-and-say' value to determine the next value in the sequence. """ # Split into groups of same consecutive digits r = '(1+|2+|3+|4+|5+|6+|7+|8+|9+|0+)' matches = re.findall(r, current_value) return ''.join([str(len(m)) + m[0] for m in matches])
338ad3c563799bc163044441a608da1491c34a0a
690,660
def set_union(sets): """Non variadic version of set.union. """ return set.union(*sets)
eda2b95f985e9f9fc9efa193de34008e376b1173
690,661
import importlib def resolve_python_path(path): """ Turns a python path like module.name.here:ClassName.SubClass into an object """ # Get the module module_path, local_path = path.split(':', 1) thing = importlib.import_module(module_path) # Traverse the local sections local_bits = local_path.split('.') for bit in local_bits: thing = getattr(thing, bit) return thing
768fa4c5b260fe1bf66dd20a8c96f36e4fb0fefe
690,665
from typing import Dict def get_reversed_enumerated_from_dict(enumerated_dict: Dict[int, int]) -> Dict[int, int]: """ Get inverse of enumerated dictionary. :param enumerated_dict: :return: """ reversed_map = {} for index_sorted, true_index in enumerated_dict.items(): reversed_map[true_index] = index_sorted return reversed_map
a0fbed023fae12e3d92e1aa5f6fa2327e05a44d0
690,666
def cut_below() -> str: """Return a "cut after this line" line.""" return "\n.--Cut after this line --.\n"
54e8098b1c27f7c7282049e21fe965f3305cdae0
690,671
def effect_on_response(codes, effect, result): """ Returns the specified effect if the resulting HTTP response code is in ``codes``. Useful for invalidating auth caches if an HTTP response is an auth-related error. :param tuple codes: integer HTTP codes :param effect: An Effect to perform when response code is in ``codes``. :param result: The result to inspect, from an Effect of :obj:`Request`. """ response, content = result if response.code in codes: return effect.on(success=lambda ignored: result) else: return result
329f81c0880768010d19cf5905e34b524b5825b9
690,673
def format_markdown(content, params): """Format content with config parameters. Arguments: content {str} -- Unformatted content Returns: {str} -- Formatted content """ try: fmt = content.format(**params) except KeyError: fmt = content return fmt
d009a3217ee1e5efdf1ee48d44b0456080595e93
690,674
def snake_to_camel(s: str): """ Convert a snake_cased_name to a camelCasedName :param s: the snake_cased_name :return: camelCasedName """ components = s.split("_") return components[0] + "".join(y.title() for y in components[1:])
10ec9951a9b63835a8161a8aa8666f1873348a6e
690,675
from typing import AbstractSet def add_to_set(set_: AbstractSet[str] | None, new: str) -> set[str]: """Add an entry to a set (or create it if doesn't exist). Args: set_: The (optional) set to add an element to. new: The string to add to the set. """ return set(set_).union([new]) if set_ is not None else {new}
ec1f6e3ca51bc11ff0996a1aab00e84e2c23228e
690,677
import torch def invert_convert_to_box_list(x: torch.Tensor, original_width: int, original_height: int) -> torch.Tensor: """ takes input of shape: (*, width x height, ch) and return shape: (*, ch, width, height) """ assert x.shape[-2] == original_width * original_height return x.transpose(dim0=-1, dim1=-2).view(list(x.shape[:-2]) + [x.shape[-1], original_width, original_height])
4f98f955bb6373bf358108fdf237aa109afae436
690,686
import re def convert_crfpp_output(crfpp_output): """ Convert CRF++ command line output. This function takes the command line output of CRF++ and splits it into one [gold_label, pred_label] list per word per sentence. Parameters ---------- crfpp_output : str Command line output obtained from a CRF++ command. Returns ------- result : list List of [gold_label, pred_label] per word per sentence. """ res = [[re.split(r'\t', token_output)[-2:] for token_output in re.split(r'\n', sentence_output)] for sentence_output in re.split(r'\n\n+', crfpp_output.strip())] return res
dbb23736516755706a3910fea86a2cfdf76771e0
690,689
import math def normalize_values_in_dict(dictionary, factor=None, inplace=True): """ Normalize the values in a dictionary using the given factor. For each element in the dictionary, applies ``value/factor``. Parameters ---------- dictionary: dict Dictionary to normalize. factor: float, optional (default=None) Normalization factor value. If not set, use the sum of values. inplace : bool, default True if True, perform operation in-place """ if factor is None: factor = sum(dictionary.values()) if factor == 0: raise ValueError('Can not normalize, normalization factor is zero') if math.isnan(factor): raise ValueError('Can not normalize, normalization factor is NaN') if not inplace: dictionary = dictionary.copy() for key, value in dictionary.items(): # loop over the keys, values in the dictionary dictionary[key] = value / factor return dictionary
109d30d3661cae45c6a4983bd7cc66ff0dcfbdf3
690,692
import json def load_times(filename="cvt_add_times.json"): """Loads the results from the given file.""" with open(filename, "r") as file: data = json.load(file) return data["n_bins"], data["brute_force_t"], data["kd_tree_t"]
1fc27348ed2028a6ebdcb13b488d98614a28c83f
690,696
def add_leading_zero(number: int, digit_num: int = 2) -> str: """add_leading_zero function Args: number (int): number that you want to add leading zero digit_num (int): number of digits that you want fill up to. Defaults to 2. Returns: str: number that has the leading zero Examples: >>> add_leading_zero(5, 3) "005" """ return str(number).zfill(digit_num)
8db9bbb762e33510de896c09917b0d832e81f7de
690,698
import re def _find_streams(text): """Finds data streams in text, returns a list of strings containing the stream contents""" re_stream = re.compile(r"<< /Length \d+ >>\n(stream.*?endstream)", re.DOTALL) streams = [] for m in re_stream.finditer(text): streams.append(text[m.start(1):m.end(1)]) return streams
37f011276d4ca2eeeb03927910b2d494519cd17e
690,701
import torch def _create_1d_regression_dataset(n: int = 100, seed: int = 0) -> torch.Tensor: """Creates a simple 1-D dataset of a noisy linear function. :param n: The number of datapoints to generate, defaults to 100 :param seed: Random number generator seed, defaults to 0 :return: A tensor that contains X values in [:, 0] and Y values in [:, 1] """ torch.manual_seed(seed) x = torch.rand((n, 1)) * 10 y = 0.2 * x + 0.1 * torch.randn(x.size()) xy = torch.cat((x, y), dim=1) return xy
1534c7a968dfb3663c1f4d953e3088225af54b5f
690,702
def decrease_parameter_closer_to_value(old_value, target_value, coverage): """ Simple but commonly used calculation for interventions. Acts to decrement from the original or baseline value closer to the target or intervention value according to the coverage of the intervention being implemented. Args: old_value: Baseline or original value to be decremented target_value: Target value or value at full intervention coverage coverage: Intervention coverage or proportion of the intervention value to apply """ return old_value - (old_value - target_value) * coverage if old_value > target_value else old_value
4f22c90fae1c69801ff4c89c1ed34ca1362dc92f
690,703
def remove_subtitle(title): """Strip a book's subtitle (if it exists). For example, 'A Book: Why Not?' becomes 'A Book'.""" if ':' in title: return title[:title.index(':')].strip() else: return title
cb223aa57a1eae2ab326a86bd7145bc345330800
690,706
import torch def get_samples_from_datasets(datasets, wav): """Gets samples (noise or speech) from the datasets. Arguments --------- datasets : list List containing datasets. More precisely, we expect here the pointers to the object used in speechbrain for data augmentation (e.g, speechbrain.lobes.augment.EnvCorrupt). wav : torch.tensor The original waveform. The drawn samples will have the same dimensionality of the original waveform. Returns ------- samples: torch.tensor A batch of new samples drawn from the input list of datasets. """ # We want a sample of the same size of the original signal samples = torch.zeros( wav.shape[0], wav.shape[1], len(datasets), device=wav.device ) # Let's sample a sequence from each dataset for i, dataset in enumerate(datasets): # Initialize the signal with noise wav_sample = (torch.rand_like(wav) * 2) - 1 len_sample = torch.ones(wav.shape[0], device=wav.device) # Sample a sequence wav_sample = dataset(wav_sample, len_sample) # Append it samples[:, :, i] = wav_sample # Random permutations of the signal idx = torch.randperm(samples.shape[-1], device=wav.device) samples[:, :] = samples[:, :, idx] return samples
490d780bafd514bb2d1b7b03c650e977fd3586eb
690,708
import math import torch def _get_log_freq(sample_rate, max_sweep_rate, offset): """Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2] offset is used to avoid negative infinity `log(offset + x)`. """ half = sample_rate // 2 start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2) return torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset
739593efa9ef15809bacddf495502d92625c002f
690,711
def identifier_path(items): """Convert identifier in form of list/tuple to string representation of filesystem path. We assume that no symbols forbidden by filesystem are used in identifiers. """ return '/'.join(items)
9835ffe136008b53c37f074311d045c4aaec64ce
690,715
from pathlib import Path from typing import Iterable def find_pictures(folder: Path) -> Iterable[Path]: """ find pictures in folder """ return folder.glob("*.jpg")
754591c5b72aeb7c74f2c0024273efc39c1a56a9
690,716
def get_result_from_payload(json_resp): """Try to get result node from the payload.""" assert json_resp is not None assert 'result' in json_resp # read the actual result return json_resp.get('result')
867ed5db5ec4759a78f47d04cb75c39185b04c02
690,724
import socket def is_port_used(ip, port): """ check whether the port is used by other program :param ip: :param port: :return: True(in use) False(idle) """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((ip, port)) return True except OSError: return False finally: s.close()
56fbcf03ab0bbfb23fdea56df8eeda19adaac539
690,726
import re def check_indent_keys(document): """ Check whether the word before the cursor was an indent key (which will trigger a reevaluation of the line's indent)""" lw_start = document.find_previous_word_beginning() or 0 lw_end = 0# document.find_previous_word_ending() or 0 col = document.cursor_position_col #print('Prev token from', lw_start, 'to', lw_end) last_tok = document.current_line[col+lw_start:col+lw_end] #print('Prev token from', lw_start, 'to', lw_end, ':', last_tok) return re.match( r'\{|\}|\(|\)|\[|\]|,|where|let|deriving|in|::|->|=>|\||=', last_tok)
05b5191582739693c53a532fc0f1f979bd70c795
690,728
def get_syscall_name(event): """Get the name of a syscall from an event. Args: event (Event): an instance of a babeltrace Event for a syscall entry. Returns: The name of the syscall, stripped of any superfluous prefix. Raises: ValueError: if the event is not a syscall event. """ name = event.name if name.startswith('sys_'): return name[4:] elif name.startswith('syscall_entry_'): return name[14:] else: raise ValueError('Not a syscall event')
689d25c9731c3725a501b135d657731a46808da8
690,732
def in_all_repository_dependencies( repository_key, repository_dependency, all_repository_dependencies ): """Return True if { repository_key : repository_dependency } is in all_repository_dependencies.""" for key, val in all_repository_dependencies.items(): if key != repository_key: continue if repository_dependency in val: return True return False
290ab8606d2b95297f3a6ecd16fb9604a79381ef
690,733
from typing import Dict def close(fulfillment_state: str, message: Dict[str, str]) -> dict: """Close dialog generator""" return { 'dialogAction': { 'type': 'Close', 'fulfillmentState': fulfillment_state, 'message': message } }
84a49d7dcb732a9e067b7f282230b45de486a899
690,736
def customer_table(data): """Return dataframe with all customers.""" customers = data['olist_customers_dataset'].copy() customers = customers[['customer_unique_id']].reset_index(drop=True) return customers
d353cfd455848ba0244e2b84c96d3857c566c9c2
690,738
def _dummy_boxstream(stream, **kwargs): """Identity boxstream, no tansformation.""" return stream
7ff65f1860c2e18149c496e135c28f43ccc7a980
690,744
import random def random_hex(digits = 12): """Generate a string of random hexadecimal digit and return as a string. Arguments: digits: the number of hexadecimal digits to create """ str_hex = ''.join([''.join(random.choice("0123456789ABCDEF")) for _ in range(digits)]) return str_hex
b571013b8e17a08dc35b14bece6bde24f9829813
690,746
def _compute_min_event_ndims(bijector_list, compute_forward=True): """Computes the min_event_ndims associated with the give list of bijectors. Given a list `bijector_list` of bijectors, compute the min_event_ndims that is associated with the composition of bijectors in that list. min_event_ndims is the # of right most dimensions for which the bijector has done necessary computation on (i.e. the non-broadcastable part of the computation). We can derive the min_event_ndims for a chain of bijectors as follows: In the case where there are no rank changing bijectors, this will simply be `max(b.forward_min_event_ndims for b in bijector_list)`. This is because the bijector with the most forward_min_event_ndims requires the most dimensions, and hence the chain also requires operating on those dimensions. However in the case of rank changing, more care is needed in determining the exact amount of dimensions. Padding dimensions causes subsequent bijectors to operate on the padded dimensions, and Removing dimensions causes bijectors to operate more left. Args: bijector_list: List of bijectors to be composed by chain. compute_forward: Boolean. If True, computes the min_event_ndims associated with a forward call to Chain, and otherwise computes the min_event_ndims associated with an inverse call to Chain. The latter is the same as the min_event_ndims associated with a forward call to Invert(Chain(....)). Returns: min_event_ndims """ min_event_ndims = 0 # This is a mouthful, but what this encapsulates is that if not for rank # changing bijectors, we'd only need to compute the largest of the min # required ndims. Hence "max_min". Due to rank changing bijectors, we need to # account for synthetic rank growth / synthetic rank decrease from a rank # changing bijector. rank_changed_adjusted_max_min_event_ndims = 0 if compute_forward: bijector_list = reversed(bijector_list) for b in bijector_list: if compute_forward: current_min_event_ndims = b.forward_min_event_ndims current_inverse_min_event_ndims = b.inverse_min_event_ndims else: current_min_event_ndims = b.inverse_min_event_ndims current_inverse_min_event_ndims = b.forward_min_event_ndims # New dimensions were touched. if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims: min_event_ndims += ( current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims) rank_changed_adjusted_max_min_event_ndims = max( current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims) # If the number of dimensions has increased via forward, then # inverse_min_event_ndims > forward_min_event_ndims, and hence the # dimensions we computed on, have moved left (so we have operated # on additional dimensions). # Conversely, if the number of dimensions has decreased via forward, # then we have inverse_min_event_ndims < forward_min_event_ndims, # and so we will have operated on fewer right most dimensions. number_of_changed_dimensions = ( current_min_event_ndims - current_inverse_min_event_ndims) rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions return min_event_ndims
a168b64107ae4fb5e9c68ea679f8c786666d6db9
690,748
def is_anonymous(user_id): """ Returns whether or not the given user is an anonymous user. :param user_id: The id of the user. :return: True, if the user is anonymous; False, otherwise. """ return user_id.startswith("hmrtmp")
c6d6620cb0626967518a0f7706ff4a4895e00167
690,749
def calc_flesch_readability(wordcount, sentcount, syllcount): """ Calculates the Flesch Readability Score. """ return round(float(float(206.835 - float(1.015 * float(wordcount / sentcount))) - float(84.6 * float(syllcount / wordcount))), 1)
faaaa6c315d905a7ea6f485c4720f8695897be79
690,752
def centers(signal, axes): """ Returns the centers of the axes. This works regardless if the axes contain bin boundaries or centers. """ def findc(axis, dimlen): if axis.shape[0] == dimlen+1: return (axis.nxdata[:-1] + axis.nxdata[1:]) / 2 else: assert axis.shape[0] == dimlen return axis.nxdata return [findc(a,signal.shape[i]) for i,a in enumerate(axes)]
cf39dd22a322d5c759cfd219c5097b54f05a0d86
690,753
def signing_bytes(uid, nonce): """ Returns list of bytes. Parameters: uid: string nonce: int """ sbs = f'{uid}{nonce}' return sbs.encode('utf-8') #return bytearray(sbs.encode())
3ae18cabd5cd1deb83385262cc58b1a18e936b79
690,754
def stdDevOfLengths(L): """ L: a list of strings returns: float, the standard deviation of the lengths of the strings, or NaN if L is empty. """ try: X = [] for l in L: X.append(len(l)) mean = sum(X)/float(len(X)) tot = 0.0 for x in X: tot += (x- mean)**2 std = (tot/len(X)) ** 0.5 except: return float('NaN') return std
04e68523d89bfe9b9e3ba9298f817bc0f374ba83
690,759
def _get_date(msg): """Returns the date included into the message. Args: msg: A json message. Returns: The date string """ return msg['date']
b9eb3f2cf62e80c1bf3274a50aef8604355e81f7
690,760
def convert_dict_of_sets_to_dict_of_lists(dictionary): """ Returns the same dictionary, but the values being sets are now lists @param dictionary: {key: set(), ...} """ out = dict() for key, setvalue in dictionary: out[key] = list(setvalue) return out
fe45c19684356be8d1bd46cda366dc8ba0a33922
690,761
def remove_every_other(my_list: list) -> list: """ This function removes every second element from the array. """ new_list = [] for i, k in enumerate(range(len(my_list))): if i % 2 == 0: new_list.append(my_list[i]) return new_list
1c23a204dec2fd6f888cbbe9b968e4ae72d4cfe9
690,762
def tick_percent(decimals=1): """A tick formatter to display the y-axis as a float percentage with a given number of decimals. Args: decimals = 1: The number of decimals to display. Returns: A tick formatter function (f(y, position)) displaying y as a percentage. """ return (lambda y, position: '{:.{decimals}f}%'.format(100.0 * y, decimals=decimals))
b9c064b767e39b4a98abd389ef1d0656dfea582a
690,765
def cereal_protein_fractions(cereals): """ For each cereal, records its protein content as a fraction of its total mass. """ result = {} for cereal in cereals: total_grams = float(cereal["weight"]) * 28.35 result[cereal["name"]] = float(cereal["protein"]) / total_grams return result
af2ce290e31aac2dc44856bd5de9a7017b225287
690,766
from typing import List from typing import Dict def flatten(l: List[List[Dict]]) -> List[Dict]: """ Flattens list of lists. :param l: list containing lists of table dictionaries :return: list containing table dictionaries """ return[item for sublist in l for item in sublist]
95a11b937a547303790c54e0c4bfdafe286f7bf6
690,768
def button_string(channel, red, blue): """Returns the string representation of a Combo PWM Mode button.""" return 'CH{:s}_{:s}_{:s}'.format(channel, red, blue)
200856405bdee5cdaaa0933cd2c4277092e23d23
690,773
def get_msg_count(bag, topic): """Get number of instances for the topic. # Parameters bag : rosbag.Bag a target rosbag topic : string a valid topic name # Returns num_msgs : int number of messages in the topic """ return bag.get_message_count(topic)
3da6964fa77dfbe982e090e4a13fcb778fd032a1
690,777
def bilinear_interpolation_01(x, y, values): """Interpolate values given at the corners of [0,1]x[0,1] square. Parameters: x : float y : float points : ((v00, v01), (v10, v11)) input grid with 4 values from which to interpolate. Inner dimension = x, thus v01 = value at (x=1,y=0). Returns: float interpolated value """ return (values[0][0] * (1 - x) * (1 - y) + values[0][1] * x * (1 - y) + values[1][0] * (1 - x) * y + values[1][1] * x * y)
a5e0d8b974803073df159da4d16a01a47ec0f087
690,778