content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from typing import Dict async def test_http() -> Dict[str, bool]: """Simple response for HTTP test requests.""" return {"success": True}
1ccc54c9e17cdfa6f48d21231f7d38b480b76df6
692,779
def reverse(arr): """ This program simply reverse the given array Time Complexity : O(n) Space Complexity : O(1) """ return arr[::-1]
687721b15de3ec94f54443f3b7bf55022d561382
692,781
def countSolutionsLogfile(logfile_path): """ Count the number of solutions in a CryptoMiniSat Logfile """ with open(logfile_path, "r") as logfile: logged_solutions = 0 for line in logfile: if "s SATISFIABLE" in line: logged_solutions += 1 return logged_solutions return -1
83a054be17b829aba11d9340a0873f54da275988
692,785
def get_case_id(line): """ Returns the case id for a given csv line :param str line: The csv line in question :return str: The case_id string """ try: return line.strip().split(",")[10] except Exception as e: print("Error: " + str(e)) return ""
ab9f1e0bd4e45565ff54302bdb17361c68e27d08
692,788
def _get_parameters_string(**kwargs): """Used to create identifiers for output""" _id = "" if kwargs: _id = "_" + ''.join('{}={}_'.format(key, val) for key, val in sorted(kwargs.items()))[:-1] return _id
553019b82ece4275ed300c926a4cfb95b6d27f9b
692,789
def same_types(obj1, obj2): """ Recursively check that obj1 and obj2 are of the same types. Better than type(obj1) == type(obj2) because it recursively checks inside lists, sets, dicts, and tuples """ t = type(obj1) if t is not type(obj2): return False if t in {list, set, dict}: for iterables in ([(obj1, obj2), (obj1.values(), obj2.values())] if t is dict else [(obj1, obj2)]): lst = [i for o in iterables for i in o] if not all(same_types(lst[0], o) for o in lst[1:]): return False if t is tuple: return len(obj1) == len(obj2) and all(same_types(o1, o2) for o1, o2 in zip(obj1, obj2)) return True
0c95c45ab01c950b40ecb009cc623213afecdbf1
692,792
def getIpIntStr(ipInt): """ Converts an IP address in host order integer to a string representation. :param ipInt: an IP address integer :rtype: str :return: A string representation of the IP address """ return ".".join(map(lambda n: str(ipInt >> n & 0xFF), [0, 8, 16, 24]))
c833d0946524cde93aadb2a6b721a17e9c00ab2c
692,795
def upload_image_to(instance, filename): """ custom path for saving images Returns: str: image path """ asset_path = f'article/{str(instance.title)}/images/{filename}' return asset_path
5dcdf6e5d80cc67678e0cfae990885c9e68d6733
692,796
def hasspec(value, specs): """Check whether any of the keys are in a dict.""" for s in specs: if s in value: return True return False
15866fc140d394169d5a7f1932977031a7cd6832
692,797
def ipkg_meta_from_pkg(pkg): """Return meta dict for Installed pkg from a PackageDescription instance.""" meta = {} for m in ["name", "version", "summary", "url", "author", "author_email", "license", "download_url", "description", "platforms", "classifiers", "install_requires", "top_levels"]: meta[m] = getattr(pkg, m) return meta
7c73546854fe022005bb7cd65711d850fc744645
692,799
def getDictFromTuple(values: tuple, keys: list, includeNone: bool = True): """returns a dict based on the tuple values and assigns the values to the keys provided\n for instance, values=(1, "bill", 5} and keys=["id", "name", "age"] returns {"id": 1, "name": "bill", "age": 5} """ _obj = {} for _i in range(len(values)): if includeNone or (values[_i] is not None): _obj[keys[_i]] = values[_i] return _obj
b4a182ee561d2640004aa57b6c75f669af9261b3
692,801
def prune_existing_records(db, records_to_be_saved): """ Return a list of records which are not already present in the db from the input list """ ok_records = [] fetch_dates = set([rec['fetch_date'] for rec in records_to_be_saved]) pre_existing = set() for fd in fetch_dates: stocks = db.asx_prices.distinct('asx_code', {'fetch_date': fd}) for stock in stocks: pre_existing.add("{}-{}".format(stock, fd)) for rec in records_to_be_saved: key = "{}-{}".format(rec['asx_code'], rec['fetch_date']) if key not in pre_existing: ok_records.append(rec) return ok_records
d7c05b2d98701a84a041b0a84c802db75a53841b
692,802
import pathlib import venv def create_venv(lib_name: str, py_version: str) -> pathlib.Path: """ creates the new virtual environment :param lib_name: name of library :param py_version: string representation of two-digit python version (ie 37) :return: path to venv """ venv_name = f"{lib_name}-go-{py_version}" venv_path = pathlib.Path(f"~/venvs/{venv_name}").expanduser() try: venv_path.mkdir(parents=True, exist_ok=False) except FileExistsError as error: raise error venv.create(env_dir=str(venv_path), with_pip=True, system_site_packages=True) return venv_path
c1cc853f121011805b801d35aa272c6b4477a8dc
692,807
def _get_fpath_to_parsed_file_map(parsed_files): """Creates a map: filepath -> Parser from the given list of Parser """ fpath_to_file = dict() for f in parsed_files: fpath_to_file[f.filepath] = f return fpath_to_file
5ecc68ba5d9918ef4171abc94de87d56b2af8e59
692,808
def to_kebab(value: str) -> str: """ snake_case to kebab-case """ try: return value.replace('_', '-') except Exception as e: raise Exception(e)
42b18fa6ec2d483a5c12064016190d500837b6fc
692,809
def select_all(_): """ Returns True for all particles. """ return True
34e277c1ae59a9032e5d09e45cf27732185d9c49
692,810
def append_slash(url, append=True): """Append a slash to a URL, checking if it already has one.""" if url.endswith("/"): if append: return url else: return url[:-1] else: if append: return url + "/" else: return url
d3bcb71674fca2e984b9c9104bfab70c434ce324
692,812
import sqlite3 from typing import Optional def execute_query( connection: sqlite3.Connection, query: str, args: Optional[dict] = None ) -> list: """Given sqlite3.Connection and a string query (and optionally necessary query args as a dict), Attempt to execute query with cursor, commit transaction, and return fetched rows""" cur = connection.cursor() if args is not None: cur.execute(query, args) else: cur.execute(query) connection.commit() results = cur.fetchall() cur.close() return results
d0f89247281d672cd74ffcd71fa6c401064512d8
692,815
import fnmatch def matchPattern(string, pattern): """ > matchPattern("nameTest1", "nameTest") False > matchPattern("nameTest1", "nameTest*") True """ return fnmatch.fnmatch(string, pattern)
ff8bf4ee28af701139e9e4b900171338c6a354d1
692,816
def is_scalar(value): """Checks if the supplied value can be converted to a scalar.""" try: float(value) except (TypeError, ValueError): return False else: return True
3e19932bdce589bee65947096f594dc856ed22e7
692,819
import torch from typing import Optional def alpha_blending( foreground: torch.Tensor, background: Optional[torch.Tensor] = None ) -> torch.Tensor: """ Here you can find all formulas https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending http://web.cse.ohio-state.edu/~parent.1/classes/581/Lectures/13.TransparencyHandout.pdf Arguments: foreground (torch.Tensor): foreground.shape == [H * W, 4], where 4 is decomposed to RGBA background (Optional[torch.Tensor]): same as foreground Outputs: output (torch.Tensor): output.shape == [H * W, 3], where 3 is decomposed to RGB """ # Wea assume that the first 3 is RGB and last 4'th is alpha foreground_rgb, foreground_alpha = foreground.split([3, 1], dim=-1) # In this case we suggest that background is white and fully opaque # and thus each pixel has color (1.0, 1.0, 1.0) and 1.0 alpha if background is None: return foreground_rgb * foreground_alpha + (1 - foreground_alpha) # Otherwise we apply premultiply alpha blending procedure background_rgb, background_alpha = foreground.split([3, 1], dim=-1) image = foreground_rgb * foreground_alpha + background_rgb * background_alpha * (1 - foreground_alpha) image /= foreground_alpha + background_alpha * (1 - foreground_alpha) return image
8b22aeee804b6ca8e3df13aecd6a48e98b7836dc
692,820
import six def force_unicode(s, encoding='utf-8'): """Convert a given string into a Unicode (decoded) string if it isn't already. Args: s (:obj:`str`): String object to convert. encoding (:obj:`str`, optional): The encoding of **s** if it is encoded. Defaults to 'utf-8'. Returns: :obj:`str`: A Unicode (decoded) version of **s**. """ if s is None or isinstance(s, six.text_type): return s return s.decode(encoding)
8a14522150d6a184006c528369f86f716438d46a
692,828
from dateutil.parser import parse def timestamp(date_time): """Convert a date string to number of seconds since 1 Jan 1970 00:00 UTC date: e.g. "2016-01-27 12:24:06.302724692-08" """ t0 = parse("1970-01-01 00:00:00+0000") t = parse(date_time) return (t-t0).total_seconds()
a736d92f09325252639c0505a894550dd55121f9
692,829
import math def polysum(n: int, s: int) -> float: """ Computes the sum of the area + perimeter squared of a regular polygon. The area of a regular polygon is : 0.25βˆ—π‘›βˆ—π‘ 2π‘‘π‘Žπ‘›(πœ‹/𝑛) The perimeter of a polygon is : length of the boundary of the polygon --------------------------------------------------------- Input: n : int the number of sides of the polygon s : int the length of each side of the polygon --------------------------------------------------------- Returns : float """ area = (0.25 * n * s**2)/(math.tan(math.pi/n)) perimeter = n * s return round(area + perimeter**2, 4)
b481a17be80075b417748986ae9de892541d335c
692,830
def isalnum(text): """ Checks if all characters in ``text`` are alphanumeric and there is at least one character A character c is alphanumeric if one of the following returns True: :func:`isalpha`, :func:`isdecimal`,:func:`isdigit`, or, :func:`isnumeric`. :param text: The string to check :type text: ``str`` :return: True if all characters in ``text`` are alphanumeric and there is at least one character, False otherwise. :rtype: ``bool`` """ assert isinstance(text,str), '%s is not a string' % text return text.isalnum()
bf63dc89522398e8c8a4d91b39dbdb37d61edc28
692,831
def match_application_commands_to_commands(application_commands, commands, match_schema): """ Matches the given application commands to slash commands. Parameters ---------- application_commands : `list` of ``ApplicationCommand`` Received application commands. commands : `None` or `list` of ``SlashCommand`` A list of slash commands if any. match_schema : `bool` Whether schema or just name should be matched. Returns ------- commands : `None` or `list` of ``SlashCommand`` The remaining matched commands. matched : `None` or `list` of `tuple` (``ApplicationCommand``, ``SlashCommand`) The matched commands in pairs. """ matched = None if (commands is not None): for application_command_index in reversed(range(len(application_commands))): application_command = application_commands[application_command_index] application_command_name = application_command.name for command_index in reversed(range(len(commands))): command = commands[command_index] if command.name != application_command_name: continue if match_schema: if (command.get_schema() != application_command): continue del application_commands[application_command_index] del commands[command_index] if matched is None: matched = [] matched.append((application_command, command)) if not commands: commands = None return commands, matched
fe5e97bf3e3560e5fbb80161ce171d3041e9cd88
692,832
def check_old_policy(policy): """ Checks the validity of a single policy using the rules from part 1 of day 2. """ letter_count = policy["passwd"].count(policy["letter"]) result = dict(policy) result["valid"] = policy["low"] <= letter_count <= policy["high"] return result
07d755be3ad71b342381d6d611a33325eecd3fd6
692,836
def figure_image_adjustment(fig, img_size): """ adjust figure as nice image without axis :param fig: :param (int, int) img_size: :return: >>> fig = figure_image_adjustment(plt.figure(), (150, 200)) >>> isinstance(fig, matplotlib.figure.Figure) True """ ax = fig.gca() ax.set_xlim([0, img_size[1]]) ax.set_ylim([img_size[0], 0]) ax.axis('off') ax.axes.get_xaxis().set_ticklabels([]) ax.axes.get_yaxis().set_ticklabels([]) fig.tight_layout(pad=0) fig.subplots_adjust(left=0, right=1, top=1, bottom=0) return fig
e026242ea1ef7cd760645a6cbec8e7b414e31d8b
692,839
import logging def replaceHTMLBlock(html, commentIdentifier, newContent): """ Replaces html content in block of <!-- commentIdentifier -->Old content<!-- end of commentIdentifier --> by new value. :param html: source html containing section(s) to be replaced :param commentIdentifier: identifier of section to be replaced :param newContent: new content of identified section :return: resulting html >>> html = "<html><body><h1>Title</h1><p><!-- content -->Here should be page content<!-- end of content --></p></body></html>" >>> html = replaceHTMLBlock(html, "content", "My content of page.") >>> print html <html><body><h1>Title</h1><p>My content of page.</p></body></html> """ commentIdentifier = commentIdentifier.strip() startId = ("<!-- %s -->" % commentIdentifier).upper() endId = ("<!-- END OF %s -->" % commentIdentifier).upper() while html.upper().find(startId) >= 0: upperCase = html.upper() startPos = upperCase.find(startId) endPos = upperCase.find(endId) if endPos < 0: logging.error("replaceHTMLBlock endPos(%d) < 0" % (endPos)) return html endCutPos = upperCase.find("-->", endPos) + 3 if endCutPos < 3: return html if startPos>=0 and endCutPos>=0: html = html[:startPos] + newContent + html[endCutPos:] return html
133e31bad4ef6d743ff5a078394a4ed6b87df862
692,842
def load_urls_from_text(text): """Load urls from text, one per line, ignore lines with #, ignores duplicity""" urls = set() lines = text.split('\n') for line in lines: # Ignore all white characters url = line.strip() # Take url only if is not commented if not line.startswith("#") and (url != '') and (url is not None): urls.add(url) return urls
eecebeb8c298f6a5d07c1e4233d29032b247f756
692,847
def save(df, corpus): """Saves dataset with predicted senses to CSV file. Args: df: Dataframe with mutisense words and their contexts. corpus: Name of the original file. Returns: Path to saved CSV file with predicted senses. """ output_fpath = corpus + "_predictions.csv" df.to_csv(output_fpath, sep="\t", encoding="utf-8", index=False) print("Generated dataset: {}".format(output_fpath)) return output_fpath
c195d43654b3c789022f063e33ae12ee5092c984
692,848
def get_subject_from_components(components): """Return the certificate subject from components list. >>> components = [('C', 'FR'), ('ST', 'Ile-de-France'), ('L', 'Paris'), ... ('O', 'Test Ltd'), ('OU', 'Test'), ('CN', 'Alain Dupont'), ... ('emailAddress', 'alain.dupont@localhost')] >>> print get_subject_from_components(components) /C=FR/ST=Ile-de-France/L=Paris/O=Test Ltd/OU=Test/CN=Alain \ Dupont/emailAddress=alain.dupont@localhost """ return u'/' + u'/'.join(['%s=%s' % (a, b) for a, b in components])
4dea7e19759986fd8d3d7756e32df06e5476a173
692,850
def bytes_view(request): """ A simple test view that returns ASCII bytes. """ return b'<Response><Message>Hi!</Message></Response>'
19bac61604ba81a0f87640670f2993a56aee4d3f
692,855
import re def strip_ansi_sequences(text: str) -> str: """Strip ANSI sequences from the input text. :param text: text to sanitize :return: sanitized text """ return re.sub( r"(?:\x1B[@-Z\\-_]|" r"[\x80-\x9A\x9C-\x9F]|" r"(?:\x1B\[|\x9B)[0-?]*[ -/]*[@-~])", "", text, )
67c0c7c950f2ed52704e3302a0067856f2a3116e
692,856
def parse_relation(fields): """ Assumes all relation are binary, argument names are discarded :param fields: correspond to one Brat line seperated by tab :return: relation id, relation name, arg1 and arg2 """ rel, a1, a2 = fields[1].split(" ") rel_id = fields[0] return rel_id, rel, a1.split(":")[1], a2.split(":")[1]
9e86f45d571e7b3de2e64645209a5854f145330e
692,858
def get_bind_args(run): """ Returns args available to template expansion for `run`. """ return { "run_id": run.run_id, "job_id": run.inst.job_id, **run.inst.args, }
9b454d408f732ea4c3b36ef486732a3e61721d52
692,860
def check_data(data): """ Check the *data* argument and make sure it's a tuple. If the data is a single array, return it as a tuple with a single element. This is the default format accepted and used by all gridders and processing functions. Examples -------- >>> check_data([1, 2, 3]) ([1, 2, 3],) >>> check_data(([1, 2], [3, 4])) ([1, 2], [3, 4]) """ if not isinstance(data, tuple): data = (data,) return data
23298eb4070eb0643b9dd75c51ac4f93ee525f0b
692,861
def decode(code): """ Convert seat code into binary and then into int. :param code: str containing position info :return: int ID corresponding to position code """ return int(code.replace('L', '0').replace('R', '1').replace('F', '0').replace('B', '1'), 2)
4ac5258983908382abaa075c5e9c7e0ce1d98903
692,865
import random def randomMultinomial(dist): """ @param dist: List of positive numbers summing to 1 representing a multinomial distribution over integers from 0 to C{len(dist)-1}. @returns: random draw from that distribution """ r = random.random() for i in range(len(dist)): r = r - dist[i] if r < 0.0: return i return "weird"
57c7b2aaa74cecc298f5bfb54f9d95d3589c1741
692,868
import re def sanitize_element(element): """ Eliminate some undeeded characters out of the XML snippet if they appear. :param element: element str :return: sanitized element str """ element = re.sub(r"\n\s+", "", element) element = re.sub(r"\n", "", element) return element
63c87e8972127bb4f85600c911f98464254cca68
692,869
def default_partition(key, nr_partitions, params): """Returns ``hash(str(key)) % nr_partitions``.""" return hash(str(key)) % nr_partitions
d49eea66c36779c6a17417e96551938a1fa6ee89
692,874
def get_port(socket): """Return the port to which a socket is bound.""" addr, port = socket.getsockname() return port
7618a44a28aa209922b257751e0b862917b1ea9c
692,875
def average_gate_error_to_rb_decay(gate_error: float, dimension: int): """ Inversion of eq. 5 of [RB]_ arxiv paper. :param gate_error: The average gate error. :param dimension: Dimension of the Hilbert space, 2^num_qubits :return: The RB decay corresponding to the gate_error """ return (gate_error - 1 + 1 / dimension) / (1 / dimension - 1)
91f58a420e08a9bc43a871db2a51c06ee4ad7756
692,876
import logging def image_sanity_fail(image, shape, description): """ Sanity check on images: training and testing; shape needs to match. description affects the logging, on failure. """ if image is None: logging.error("{} : image is None".format(description)) return True elif image.shape != shape: logging.error("{} : shape is {}, (expecting {})".format( description, repr(image.shape), repr(shape))) return True else: return False
a7c795495e1f4766630d07599964fc9e08620816
692,878
def get_active_profile(content, key): """ Gets the active profile for the given key in the content's config object, or NONE_PROFILE """ try: if content.config.has_option(key, 'profile'): return content.config.get(key, 'profile') else: return 'None' except: return 'None'
b26491381fcd22003316ce9d1c2eb0577d89d715
692,879
def _find_provider(hass, prov_type): """Return provider for type.""" for provider in hass.auth.auth_providers: if provider.type == prov_type: return provider return None
e8cfa630f961f330785064c9b8e0d51d2231ef35
692,885
def filter_dict(data, *keys): """ Returns a smaller dict with just `keys`. """ d = {} for k in keys: val = data.get(k) if val: d[k] = val return d
79d8ad9b79981b0d36230c2a787266f20ac0ef00
692,886
def unpack_list(a_list): """ ================================================================================================= unpack_list(a_list) This is a recursive function which takes a list of lists of... and returns a single list containing all elements of the input lists. ================================================================================================= Arguments: a_list -> A list of an arbitrary number of sublists ================================================================================================= Returns: A list containing all elements of all sublists. ================================================================================================= """ # Initialize the output list outlist = [] # loop over the elements in the input list for element in a_list: # If the element is a list or a tuple if type(element) in [list, tuple]: # then use unpack_list() to unpack the # element and add that value to theoutlist outlist += unpack_list(element) # Otherwise, else: # Add the element to the list outlist.append(element) # and return the output list once the loop finishes return outlist
fd75ff640fba451cdf27c4efc36f6a4a8441e544
692,889
def update_ports(cfmclient, port_uuids, field, value): """ Update attributes of composable fabric switch ports :param cfmclient: :param port_uuids: list of str representing Composable Fabric port UUIDs :param field: str specific field which is desired to be modified (case-sensitive) :param value: str specific field which sets the new desired value for the field :return: dict which contains count, result, and time of the update :rtype: dict """ if port_uuids: data = [{ 'uuids': port_uuids, 'patch': [ { 'path': '/{}'.format(field), 'value': value, 'op': 'replace' } ] }] return cfmclient.patch('v1/ports', data)
0088d5a569be878d53de662943c6c86db4cd8d76
692,891
def zipper_merge(*lists): """ Combines lists by alternating elements from them. Combining lists [1,2,3], ['a','b','c'] and [42,666,99] results in [1,'a',42,2,'b',666,3,'c',99] The lists should have equal length or they are assumed to have the length of the shortest list. This is known as alternating merge or zipper merge. """ return list(sum(zip(*lists), ()))
9e892a201684f5d215fd38c46c72bed0e457ba40
692,892
import copy def create_sample_tallying_counters(samples_list): """ Creates a tallyiing dictionary of samples for reporting the final results : Param samples_list: List of samples : Return samples_counter_dict: Dictionary of samples and empty tally scores """ samples_counters_dict = {} tallying_dictionary = {'Biallelic_Testable': 0, 'Sig_ASE': 0, 'Sig_ASE_Ref': 0, 'Sig_ASE_Alt': 0, 'Biallelic_No_ASE': 0, 'Passing_Homozygous': 0, 'Passing_Homozygous_Ref': 0, 'Passing_Homozygous_Alt': 0, 'Non_Testable': 0} for sample in samples_list: #Making a deep copy of the dictionary, so each one is independent tallying_dictionary = copy.deepcopy(tallying_dictionary) samples_counters_dict.update({sample: tallying_dictionary}) return(samples_counters_dict)
8791a893f20230370a6495ad29c6fc2d10e33e1e
692,894
from typing import Callable from typing import Any from functools import reduce def pipe(*operators: Callable[[Any], Any]) -> Callable[[Any], Any]: """Compose multiple operators left to right. Composes zero or more operators into a functional composition. The operators are composed to left to right. A composition of zero operators gives back the source. Examples: >>> pipe()(source) == source >>> pipe(f)(source) == f(source) >>> pipe(f, g)(source) == g(f(source)) >>> pipe(f, g, h)(source) == h(g(f(source))) ... Returns: The composed observable. """ def compose(source: Any) -> Any: return reduce(lambda obs, op: op(obs), operators, source) return compose
b663782ccce3002ce8f21e42a5c47b205649c157
692,895
from typing import List from datetime import datetime def get_closest_timestamp(timestamps: List[datetime], ref_timestamp: datetime) -> datetime: """ Get the timestamo closest to the reference timestamp """ closest_idx = 0 for i, ts in enumerate(timestamps): if abs((ts - ref_timestamp).days) < abs((timestamps[closest_idx] - ref_timestamp).days): closest_idx = i return timestamps[closest_idx]
514d1713321c2c2a0a22d45ff20c45eb83c24a6a
692,896
def _xml_escape_attr(attr, skip_single_quote=True): """Escape the given string for use in an HTML/XML tag attribute. By default this doesn't bother with escaping `'` to `&#39;`, presuming that the tag attribute is surrounded by double quotes. """ escaped = (attr .replace('&', '&amp;') .replace('"', '&quot;') .replace('<', '&lt;') .replace('>', '&gt;')) if not skip_single_quote: escaped = escaped.replace("'", "&#39;") return escaped
bc2e28a480ba41b13708665b55eb822e207bb236
692,897
def encode_id(instance_id): """ Convert an instance id to mask colour This matches the encoding done in the dataset renderer, see https://github.com/jskinn/Dataset_Synthesizer/blob/local-devel/Source/Plugins/NVSceneCapturer/Source/NVSceneCapturer/Private/NVSceneCapturerUtils.cpp#L673 :param instance_id: :return: """ return [ (instance_id << 1) & 254, (instance_id >> 6) & 254, (instance_id >> 13) & 254 ]
156862e36400c934a58ca1ad6dc3c24fdf9f7c65
692,898
def build_config_var(beta=False, external=False): """ Create the configuration key which will be used to locate the base tiddlywiki file. """ base = 'base_tiddlywiki' if external: base += '_external' if beta: base += '_beta' return base
bb3adb422ef26740702acd24c517b095921a4e83
692,899
def get_pair_st_so(st_id,so_id): """ Get string st<st_id>-so<so_id> (e.g. st0-so0). Parameters ---------- st_id : int station id. so_id : int source id. Returns ------- pair_st_so : str output. """ return("st"+str(st_id)+"-"+"so"+str(so_id))
0f222818163706f40dc835ee2b66c7420834ea7a
692,905
def read_file(file_name): """ read lines from a file and return array :param file_name: path to file :type file_name: str """ with open(file_name, 'r') as file: array = [] for line in file: array.append(line.strip()) return array
9b9c2fe87be5e2a63ded1cc9bc0be15499a1b90f
692,909
def params_1(kernels, time_1, time_system, time_format, sclk_id): """Input parameters from WGC API example 1.""" return { 'kernels': kernels, 'times': time_1, 'time_system': time_system, 'time_format': time_format, 'sclk_id': sclk_id, }
10a5c31b2fbd2575093dc51dd0cf49b5f67f7172
692,910
import yaml def load_yaml(file_path): """Load yaml file located at file path, throws error if theres an issue loading file. """ with open(file_path) as fin: content = yaml.load(fin, Loader=yaml.FullLoader) return content
33baca8cb28a935d6a8d0dd643cd7cf716c191ac
692,911
import hashlib def computeFileChecksum(algo, filePath): """Compute digest of ``filePath`` using ``algo``. Supported hashing algorithms are SHA256, SHA512, and MD5. It internally reads the file by chunk of 8192 bytes. :raises ValueError: if algo is unknown. :raises IOError: if filePath does not exist. """ if algo not in ['SHA256', 'SHA512', 'MD5']: raise ValueError("unsupported hashing algorithm %s" % algo) with open(filePath, 'rb') as content: hash = hashlib.new(algo) while True: chunk = content.read(8192) if not chunk: break hash.update(chunk) return hash.hexdigest()
5e93b79ec6f008133e2ce436c91be9452d912c63
692,914
def currency_to_num(string, data_type=int): """ Converts a pound sterling currency value into a number. >>> currency_to_num("Β£250,000") >>> 250000 :param string: value of currency as a string :param data_type: intended data type of output :return: numerical value of currency """ value = string.strip().replace('Β£', '').replace(',', '').replace('pcm', '') try: return data_type(value) except ValueError: return value
884f964695100758220387f67ab296d5182cf503
692,917
def quotePath(path): """Appends quotes around string if quotes are not already present""" if path[0] != r'"': path = r'"'+path if path[-1] != r'"': path = path+r'"' return path
fdd0a2e31a9515dcf41983e52d54b065c09e76df
692,918
def r_upper(l: list) -> list: """Recursively calls str.upper on each string in l.""" result = [] if not l: return [] result.append(l[0].upper()) return result + r_upper(l[1:])
efbafb59201be5c504756920d82b541d1f32702a
692,919
import csv from io import StringIO def csv_res2_dict_lst(res): """Convert CSV string with a header into list of dictionaries""" return list(csv.DictReader(StringIO(res), delimiter=","))
5271cd4ef1e82fdc77b0d69c58faedf2f971c07c
692,921
def check_zones(domain, zones): """ Check if the provided domain exists within the zone """ for zone in zones: if domain == zone or domain.endswith("." + zone): return zone return None
b00a8db6175f13c2227ce5b51fe0954a04252fea
692,922
import torch def interpolate(x: torch.Tensor, ratio: int): """Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num) """ (batch_size, time_steps, classes_num) = x.shape upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num) return upsampled
507afdbbf1a3b35f00ea0721a4343d07df4c1ec5
692,926
def base_arguments(source="/tmp", target="/tmp", start=None, end=None, glob="*"): """Builds the base arguments dictionary Args: source (str): the source directory of the packets target (str): the target directory for the output start (str): the time for the earliest packets end (str): the time for the latest packets glob (str): file-glob for the source files Returns: dict: arguments for the GetPackets Constructor """ return dict(source=source, target=target, start=start, end=end, source_glob=glob)
f58add887447f2e262a7bb9a65d617b11e9a7265
692,927
def collate_acceptance_ratios(acceptance_list): """ Collate the running proportion of all runs that have been accepted from an MCMC chain. """ count, n_total, ratios = 0, 0, [] for n_accept in acceptance_list: n_total += 1 if n_accept: count += 1 ratios.append(count / n_total) return ratios
24138b866db311e6757389311ab308336b4eabec
692,928
def turn_cycle(cycle, front_node): """ turn the list of the cycle to ensure frontnode is the first :param cycle: the cycle to be turned :param front_node: the node that needs to be at the front(and the back) :return: the turned cycle takes at most cyclelength -1 runs, so is bounded by O(V) """ if front_node not in cycle: # ensure it will not run forever because it lacks the required node raise Exception("incorrect use of turn_cycle function, front_node not in given cycle") while cycle[ 0] != front_node: # as long as the node at the front is not the desired frontnode, make the second node the node at the front and check again cycle = cycle[1:] cycle += (cycle[0],) return cycle
58ff487f5cea72bbfbb9076df97c432af42eb482
692,929
def get_individual(individual, ensembl_json): """Return a list with the genotypes of the individual.""" genotypes = [] for individual_genotype in ensembl_json["genotypes"]: if individual in individual_genotype["sample"]: genotypes.append(individual_genotype) return genotypes
a3024c8f7ec15b37ceb7d83874a662354a14ee57
692,931
def find_period(l): """ Finds the period of list of numbers. Parameters ---------- l: integer[] The sequence of numbers. Return ------ steps: integer The period. Returns None, if no period is found. """ steps = 1 for i in range(1, len(l)): if l[i] == l[0]: if l[:i] == l[i:i+i]: return steps steps += 1 return None
7de707c550aeeaa37ad7ad7b1d1819b2809059fe
692,936
def partition_names_by_comp(names, compmap=None): """Take an iterator of names and return a dict with component names keyed to lists of variable names. Simple names (having no '.' in them) will have a key of None. For example, the list ['abc.def', 'abc.pdq', 'foo', 'bar'] would return the dict { 'abc': ['def','pdq'], None: ['foo', 'bar'] } If a compmap dict is passed in, it will be populated with data from the iterator of names. """ if compmap is None: compmap = {} for name in names: parts = name.split('.', 1) if len(parts) == 1: compmap.setdefault(None, []).append(name) else: compmap.setdefault(parts[0], []).append(parts[1]) return compmap
7dc9c90feef9fdaf3ac78e5a04a7568265d70b30
692,937
import re def parse_report(text): """Split report to single cases""" regexp = r'\d\.\s' result_split = re.split(regexp, text) result_none = list(filter(None, result_split)) result = [element.rstrip().rstrip(';') for element in result_none] return result
e16e37fb7c04731199e5ee6c3ce43e75ac0e8a47
692,938
from typing import List def normalize_identity(un_normalized: List[str], verbose: bool = False) -> List[str]: """ Identity normalizer. Returns input unchanged Args: un_normalized: input string Returns input string """ return un_normalized
a20268b30bc5f7ea21e4e90763d52c5c25733e85
692,940
def getLatLong(bbox): """ Get the tuple of minimum and maximum latitudes and longitudes. :param bbox: `geographic_msgs/BoundingBox`_. :returns: (min_lat, min_lon, max_lat, max_lon) """ return (bbox.min_pt.latitude, bbox.min_pt.longitude, bbox.max_pt.latitude, bbox.max_pt.longitude)
82990d3897c94ab0f8e7e0f3b49369d689ee22bc
692,946
import re def pattern_match(item : str, pattern : str, strict : bool = True) -> bool: """ Check if item matches with the pattern that contains "*" wildcards and "?" question marks. Args: item: The string that pattern will be applied to. pattern: A wildcard (glob) pattern. strict: If `True`, then it will check if matched string equals with the `item` parameter. So applying "foo?" pattern on "foobar" will result in `False`. Default is `True`. Returns: A boolean value. """ _ptn = pattern.replace(".", "\.").replace("+", "\+").replace("*", ".+").replace("?", ".") _match = re.match(_ptn, item) if strict and bool(_match): return _match.group(0) == item return bool(_match)
fb92c1782f684e6a6fbad4890a299e5670a9487e
692,947
def process(result, labels, tensor_name, threshold, top_k): """Processes inference result and returns labels sorted by confidence.""" # MobileNet based classification model returns one result vector. assert len(result.tensors) == 1 tensor = result.tensors[tensor_name] probs, shape = tensor.data, tensor.shape assert shape.depth == len(labels) pairs = [pair for pair in enumerate(probs) if pair[1] > threshold] pairs = sorted(pairs, key=lambda pair: pair[1], reverse=True) pairs = pairs[0:top_k] return [' %s (%.2f)' % (labels[index], prob) for index, prob in pairs]
add2992d9f3321b02c6602f6744b206743525d23
692,949
def find_between(in_str, start='>', end='<'): """ Find string between two search patterns. """ return in_str.split(start)[1].split(end)[0]
22e52d8865a81ac33b0d498252b1d09c415d68ec
692,953
def _parse_logline_timestamp(t): """Parses a logline timestamp into a tuple. Args: t: Timestamp in logline format. Returns: An iterable of date and time elements in the order of month, day, hour, minute, second, microsecond. """ date, time = t.split(' ') month, day = date.split('-') h, m, s = time.split(':') s, ms = s.split('.') return (month, day, h, m, s, ms)
9b0ea2f6cfe4edef89eec6dbbddbdd258640c210
692,957
def valid_location(currBoard, location): """ Takes in the current board and a potential location and checks if placing something in that square is a valid move or not. Ends by returning true if the move's valid and false otherwise. """ if (location > 8 or location < 0): # Checks if the number is too small or big return False elif (currBoard[location] != " "): # Checks if the location is taken return False else: return True
76a8fbc7b22de9324f68787feac163ce7bcdff50
692,959
def _isnumeric(var) : """ Test if var is numeric, only integers are allowed """ return type(var) is int
51c29247a5d4531f565534afe7750a13138c110e
692,960
def get_description_eng(cve): """ Attempts to extract an english description from the provided cve. If none is found, returns a blank string. Parameters ---------- cve : dict The dictionary generated from the CVE json. Returns ------- str This will be either the english description, or a blank string. """ description_eng = "" if "description" in cve and "description_data" in cve["description"]: for description_data in cve["description"]["description_data"]: if "lang" in description_data and description_data["lang"] == "eng": if "value" in description_data: description_eng = description_data["value"].lower() break return description_eng
3e7d5e6f61fd752225fd0f95178bf2aea4bbbf5c
692,961
import base64 import json def _read_pubsub_json(event): """Extracts the json payload from a pub/sub message. Args: event: A Pub/Sub event. Returns: The json_payload from a pub/sub message. """ pubsub_message = base64.b64decode(event['data']).decode('utf-8') return json.loads(pubsub_message)
9bfafe8f36e6bcd0db68f9d4528081c44067b04f
692,962
def _conformal_score_interval(predictions, values): """ Compute the non-conformity score of a set of values under some baseline predictor Args: predictions: array [batch_shape, 2], a batch of interval predictions values: array [n_evaluations, batch_shape], note that for values batch_shape is the last dimension while for predictions batch_shape is the first dimension Returns: score: array [n_evaluations, batch_shape], where score[i, j] is the non-conformity score of values[i, j] under the prediction[j] """ score = (values - predictions.min(dim=1, keepdims=True)[0].permute(1, 0)) / (predictions[:, 1:2] - predictions[:, 0:1]).abs().permute(1, 0) - 0.5 return score
5ae6c5ac653a5cfe0c40d5af4434df00cba84d32
692,967
def calcHedgeRatio(betaHat: float, sigmaHat: float) -> float: """Calculates the hedge ratio. Parameters ---------- betaHat : float Beta hat of two assets. sigmaHat : float Sigma hat of two assets. Returns ------- float Returns the hedge ratio. """ return betaHat * (1 + 0.5 * sigmaHat)
aa694502b5364631f126598e7d86f3ef80607afa
692,971
def categorize_columns(cols, msuffix='_mean', esuffix='_error'): """Categorize the column names of a mean dataframe. Args: cols (list): a list of column names Return: (list, list, list): (excol, mcol, ecol) excol are columns of exact values with no errorbar (possibly labels) mcols are mean columns ecols are error columns Examples: >>> rcol, mcol, ecol = categorize_columns(mdf.columns) >>> xyye(df, 'Pressure', 'LocalEnergy', xerr=True) """ mcol = [col for col in cols if col.endswith(msuffix)] ecol = [col for col in cols if col.endswith(esuffix)] rcol = [col for col in cols if (not col.endswith(msuffix)) and (not col.endswith(esuffix))] return rcol, mcol, ecol
3c8752d30d0258ff67d15d0e75c2c2247c5d11fd
692,973
def home_view(request): """Home view.""" return {}
df385b4641f7ab07f477289d2a7bbf04378072f6
692,982
import re def clean_markdown(message_content: str) -> str: """Returns a string stripped of any markdown.""" return re.sub(r"[^a-zA-Z0-9\s]", "", message_content)
731a044968c9501987d8d7d7e7164fd5cd1a253a
692,986
def group_by_compatibility(thermodynamic_states): """Utility function to split the thermodynamic states by compatibility. Parameters ---------- thermodynamic_states : list of ThermodynamicState The thermodynamic state to group by compatibility. Returns ------- compatible_groups : list of list of ThermodynamicState The states grouped by compatibility. original_indices: list of list of int The indices of the ThermodynamicStates in theoriginal list. """ compatible_groups = [] original_indices = [] for state_idx, state in enumerate(thermodynamic_states): # Search for compatible group. found_compatible = False for group, indices in zip(compatible_groups, original_indices): if state.is_state_compatible(group[0]): found_compatible = True group.append(state) indices.append(state_idx) # Create new one. if not found_compatible: compatible_groups.append([state]) original_indices.append([state_idx]) return compatible_groups, original_indices
8cc8f5f7e39f4354b014805fd687ea4ba92bcc81
692,987
def _parse_node_to_coords(element): """ Parse coordinates from a node in the overpass response. The coords are only used to create LineStrings and Polygons. Parameters ---------- element : dict element type "node" from overpass response JSON Returns ------- coords : dict dict of latitude/longitude coordinates """ # return the coordinate of a single node element coords = {"lat": element["lat"], "lon": element["lon"]} return coords
6ce67abb5b294ea8458ecdee64d2b49736348372
692,988
import json def encode_project_info(long_name, description): """Encode a Sumatra project as JSON""" data = {} if long_name: data["name"] = long_name if description: data["description"] = description return json.dumps(data)
65ab651a812741986edc3ac8c0a4188c930420ff
692,994
def assign_asset_type_to_province_roads(x): """Assign asset types to roads assets in Vietnam The types are assigned based on our understanding of: 1. The reported asset code in the data Parameters x - Pandas DataFrame with numeric asset code Returns asset type - Which is either of (Bridge, Dam, Culvert, Tunnel, Spillway, Road) """ if x.code in (12, 25): return 'Bridge' elif x.code == (23): return 'Dam' elif x.code == (24): return 'Culvert' elif x.code == (26): return 'Tunnel' elif x.code == (27): return 'Spillway' else: return 'Road'
b759f0e3295dddc2348e1888cae157333c2b2d32
692,995
def parse_csv_results(csv_obj, upper_limit_data): """ Parses the raw CSV data Convers the csv_obj into an array of valid values for averages and confidence intervals based on the described upper_limits. Args: csv_obj: An array of rows (dict) descriving the CSV results upper_limit_data: A dictionary containing the upper limits of each story Raturns: A dictionary which has the stories as keys and an array of confidence intervals and valid averages as data. """ values_per_story = {} for row in csv_obj: # For now only frame_times is used for testing representatives' # performance. if row['name'] != 'frame_times': continue story_name = row['stories'] if (story_name not in upper_limit_data): continue if story_name not in values_per_story: values_per_story[story_name] = { 'averages': [], 'ci_095': [] } if (row['avg'] == '' or row['count'] == 0): continue values_per_story[story_name]['ci_095'].append(float(row['ci_095'])) values_per_story[story_name]['averages'].append(float(row['avg'])) return values_per_story
fb92c6be25abae94f615c0dedb5723eec8e49f62
692,996
import torch def BCELoss_labels_weighted(P: torch.Tensor, Y: torch.Tensor, W: torch.Tensor) \ -> torch.Tensor: """ Binary cross entropy loss which allows for different weights for different labels. Parameters ---------- P : torch.Tensor The predicted labels. Y : torch.Tensor The true labels. W : torch.Tensor The weights per label. Returns ------- loss : torch.Tensor Tensor object of size (1,1) containing the loss value. """ P = torch.clamp(P, min=1e-7, max=1 - 1e-7) bce = W * (- Y * torch.log(P) - (1 - Y) * torch.log(1 - P)) loss = torch.mean(bce) return loss
03d0088f8276f2cd106b6f628e6661eb115aa360
693,003
def apply_format(var, format_str): """Format all non-iterables inside of the iterable var using the format_str Example: >>> print apply_format([2, [1, 4], 4, 1], '{:.1f}') will return ['2.0', ['1.0', '4.0'], '4.0', '1.0'] """ if isinstance(var, (list, tuple)): new_var = map(lambda x: apply_format(x, format_str), var) if isinstance(var, tuple): new_var = '(' + ', '.join(new_var) + ')' elif isinstance(var, list): new_var = '[' + ', '.join(new_var) + ']' return '{}'.format(new_var) else: return format_str.format(var)
ad011be4a5998c9a338f54c9c3550da00375273c
693,004
def currency_clean_helper(currency, value): """Used to validate that a currency value works for a give currency. Should be called from a forms clean() method. Returns (value, errors) """ whole = value[0] frac = str(value[1]) if len(value) == 2 else None if frac and len(frac) > currency.decimal_places: return None, "Too many decimal places (%s) for currency %s" % ( len(frac), currency) if not frac: frac = '0' * currency.decimal_places elif len(frac) < currency.decimal_places: frac += '0' * (currency.decimal_places - len(frac)) return int(str(whole) + frac), None
db51c6969316264065bb973a1871021775f40f6c
693,005
import logging import requests import json def add_intersight_org(AUTH, RES_MOID, CLAIM_CONFIG): """ Add Intersight Organization """ request_body = { "Name": CLAIM_CONFIG['partner_id'], "Description": "Org for " + CLAIM_CONFIG['partner_id'], "ResourceGroups": [ { "ObjectType":"resource.Group", "Moid":RES_MOID } ] } logging.info(request_body) response = requests.post( CLAIM_CONFIG['intersight_base_url'] + 'organization/Organizations', data=json.dumps(request_body), auth=AUTH ) logging.info(response.text) response_json = response.json() logging.info("ORGANIZATION: " + response_json["Moid"]) return response_json["Moid"]
b8727f6f4db32bfed5f162cd0ac97fe4b3eb2d8d
693,006
def read_sample_rate(data_dir: str): """Read the sample rate from the raw_data.csv file""" with open(f"{data_dir}/raw_data.csv") as csvfile: _name_row = next(csvfile) fs = float(next(csvfile).strip().split(",")[1]) return fs
bdc1a8e32ee33f6cd556ca6680400c803499b14b
693,008
def is_builder_newer(old_component, new_component): """ Return True if the given builder has been modified with respect to its state when the given component_meta was created. :param old_component: a dict of metadata describing a component ring :param new_component: a dict of metadata describing a component ring :return: True if the builder has been modified, False otherwise. :raises ValueError: if the version of the new_component is older than the version of the existing component. """ if new_component['version'] < old_component['version']: raise ValueError('Older builder version: %s < %s' % (new_component['version'], old_component['version'])) return old_component['version'] < new_component['version']
7491b7162e33fccc0171ee8a991cb283f6bca817
693,009