content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def last(inlist):
"""
Return the last element from a list or tuple, otherwise return untouched.
Examples
--------
>>> last([1, 0])
0
>>> last("/path/somewhere")
'/path/somewhere'
"""
if isinstance(inlist, (list, tuple)):
return inlist[-1]
return inlist | cb4d8e1e594667417be059eb42f53f2d0f14f46b | 686,188 |
from typing import Any
import pickle
def get_papers_pickle(filename: str) -> dict[str, Any]:
"""retrive papers (dict format) from file in folder data/papers"""
with open(f"data/papers/{filename}", 'rb') as papers_file:
return pickle.load(papers_file) | 1d8ef83d34fd4ac92f83068570ae26745d80d0d5 | 686,190 |
import requests
from bs4 import BeautifulSoup
def get_article(url):
"""
Gets article content from URL
Input : URL of article
Output : Content in BeautifulSoup format
"""
r = requests.get(url)
html_soup = BeautifulSoup(r.content, 'lxml')
return html_soup | 4b1246d8a1e3f60b29a0b4c0bf9540d345c3a936 | 686,195 |
def _convert_from_european_format(string):
"""
Conver the string given in the European format (commas as decimal points,
full stops as the equivalent of commas), e.g. 1,200.5 would be written as
1.200,5 in the European format.
:param str string: A representation of the value as a string
:returns: The string converted to standard format
:rtype: str
"""
string = string.replace(".", "")
string = string.replace(",", ".")
return string | 219d197532fc4dae6d3fee7f47f431ea06ec7e0e | 686,203 |
import torch
def box_iou(boxes1, boxes2):
"""Compute IOU between two sets of boxes of shape (N,4) and (M,4)."""
# Compute box areas
box_area = lambda boxes: ((boxes[:, 2] - boxes[:, 0]) *
(boxes[:, 3] - boxes[:, 1]))
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
unioun = area1[:, None] + area2 - inter
return inter / unioun | 8cec4aefcb8b9779b444f3095ea9b6b97cf456b0 | 686,206 |
import requests
def fetch_json(uri):
"""Perform an HTTP GET on the given uri, return the results as json.
If there is an error fetching the data, raise an exception.
Args:
uri: the string URI to fetch.
Returns:
A JSON object with the response.
"""
data = requests.get(uri)
# Raise an exception if the fetch failed.
data.raise_for_status()
return data.json() | d42cf52a43e9a06e49b0ede3cae69e074b2cbae9 | 686,211 |
def my_method2(o):
"""
This is anoooother doctring of a method living inside package1. Why? Just because I wanted to have more than one.
Parameters
----------
o : int
Note that this parameter should be a string because it wanted to be different.
Returns
-------
Five times 'o'
"""
return 5 * o | 15d67e2ecc7d617516e29a70555780ed628edce8 | 686,213 |
import requests
def authenticate_with_refresh_token(client_id, redirect_uri, refresh_token):
"""Get an access token with the existing refresh token."""
try:
url = 'https://api.tdameritrade.com/v1/oauth2/token'
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
payload='grant_type=refresh_token&refresh_token='+refresh_token+'&client_id='+client_id+'&redirect_uri='+redirect_uri
response = requests.request("POST", url, headers=headers, data = payload)
data = response.json()
return data['access_token']
except:
# Exception occurred, refresh token is invalid
print('Invalid refresh token.')
return '' | dc08162e6316466b23715e59d1489e899c1c3789 | 686,215 |
def root_center(X, root_idx=0):
"""Subtract the value at root index to make the coordinates center around root. Useful for hip-centering the skeleton.
:param X: Position of joints (NxMx2) or (NxMx3)
:type X: numpy.ndarray
:param root_idx: Root/Hip index, defaults to 0
:type root_idx: int, optional
:return: New position of joints (NxMx2) or (NxMx3)
:rtype: numpy.ndarray
"""
assert len(X.shape) == 3 or len(X.shape) == 2
if len(X.shape) == 3:
return X - X[:, root_idx:root_idx+1, :]
else:
return X - X[root_idx:root_idx+1, :] | 0a2738cf902e0036e24eb36b646aec2207c9bbb6 | 686,216 |
def getOperands(inst):
"""
Get the inputs of the instruction
Input:
- inst: The instruction list
Output:
- Returns the inputs of the instruction
"""
if inst[0] == "00100":
return inst[3], inst[4]
else:
return inst[2], inst[3] | 90789fbc7f7effa2cae3a1db68e74bb125f2a109 | 686,217 |
def checkfile(findstring, fname):
""" Checks whether the string can be found in a file """
if findstring in open(fname).read():
#print(findstring, "found in", fname)
return True
return False | 6f9afa50cf776c977010bbc14f95bd107f8a62d3 | 686,219 |
def crop_region(image, region):
"""Crops a singular region in an image
Args:
image (image): A numpy image
region (dict): A dictionary containing x1, y1, x2, y2
Returns:
image: The cropped image
"""
return image[ region["y1"]:region["y2"], region["x1"]:region["x2"] ] | 26566d9e081abcfffe41be227c6548a9a3a6cad5 | 686,222 |
from typing import Sequence
import random
def sample_coins(coin_biases: Sequence[float]) -> int:
"""Return integer where bits bias towards 1 as described in coin_biases."""
result = 0
for i, bias in enumerate(coin_biases):
result |= int(random.random() < bias) << i
return result | b9c2aa8e31e19a175229625da8958d3525343dbd | 686,223 |
import inspect
def get_wrapped_source(f):
"""
Gets the text of the source code for the given function
:param f: Input function
:return: Source
"""
if hasattr(f, "__wrapped__"):
# has __wrapped__, going deep
return get_wrapped_source(f.__wrapped__)
else:
# Returning getsource
return inspect.getsource(f) | def0b8da05c56f04b65442cec0d5cf472ffa166c | 686,230 |
def pad_sentences(sentences,padding_word="<PAD/>"):
"""
填充句子,使所有句子句子长度等于最大句子长度
:param sentences:
:param padding_word:
:return: padded_sentences
"""
sequence_length=max(len(x) for x in sentences)
padded_sentences=[]
for i in range(len(sentences)):
sentence=sentences[i]
num_padding=sequence_length-len(sentence)
new_sentence=sentence+num_padding*[padding_word]
padded_sentences.append(new_sentence)
return padded_sentences | 123cf1a8d08eaba74792e6567e9d9b1512a29fcf | 686,232 |
def get_util2d_shape_for_layer(model, layer=0):
"""
Define nrow and ncol for array (Util2d) shape of a given layer in
structured and/or unstructured models.
Parameters
----------
model : model object
model for which Util2d shape is sought.
layer : int
layer (base 0) for which Util2d shape is sought.
Returns
---------
(nrow,ncol) : tuple of ints
util2d shape for the given layer
"""
nr, nc, _, _ = model.get_nrow_ncol_nlay_nper()
if nr is None: # unstructured
nrow = 1
ncol = nc[layer]
else: # structured
nrow = nr
ncol = nc
return (nrow, ncol) | a56e00698b6d498800b895e83c84bed5b2ccc09d | 686,234 |
from typing import Any
def is_of_type_by_str(value: Any, type_str: str):
"""
Check if the type of ``value`` is ``type_str``.
Parameters
----------
value: any
a value
type_str: str
the expected type of ``value``, given as a str
Examples
--------
>>> is_of_type_by_str(2, 'int')
True
>>> is_of_type_by_str("2.5", 'float')
False
Returns
-------
boolean
"""
return value.__class__.__name__ == type_str | d1090685bb2f0983481f3f1c9f355b1b3ed38adf | 686,242 |
def default(base, deft):
"""Return the deft value if base is not set.
Otherwise, return base"""
if base == 0.0:
return base
return base or deft | 98dd697f762acb056cb491d31c54e2d8ad47475e | 686,243 |
def points_to_svgd(p, close=True):
""" convert list of points (x,y) pairs
into a closed SVG path list
"""
f = p[0]
p = p[1:]
svgd = 'M%.4f,%.4f' % f
for x in p:
svgd += 'L%.4f,%.4f' % x
if close:
svgd += 'z'
return svgd | 9d9f9c94757d8f99d11c4d7959ca4baff2adc290 | 686,246 |
def __nnc_values_generator_to_list(self, generator):
"""Converts a NNC values generator to a list."""
vals = []
for chunk in generator:
for value in chunk.values:
vals.append(value)
return vals | d2e269088fd93fc740c006cd69aad87695ca5a74 | 686,247 |
import re
def camel_to_snake_case(in_str):
"""
Convert camel case to snake case
:param in_str: camel case formatted string
:return snake case formatted string
"""
return '_'.join(re.split('(?=[A-Z])', in_str)).lower() | a905bd2007fcaafed1e811a48c5fe14831e77631 | 686,249 |
def get_collection_fmt_desc(colltype):
"""Return the appropriate description for a collection type.
E.g. tsv needs to indicate that the items should be tab separated,
csv comma separated, etc...
"""
seps = dict(
csv='Multiple items can be separated with a comma',
ssv='Multiple items can be separated with a space',
tsv='Multiple items can be separated with a tab',
pipes='Multiple items can be separated with a pipe (|)',
multi='',
)
if colltype in seps:
return seps[colltype]
return '' | f9ac54559042a1f9ae3180e5c303f3a1aa29913e | 686,250 |
from typing import Callable
def interpolator(source_low: float, source_high: float,
dest_low: float = 0, dest_high: float = 1,
lock_range=False) -> Callable[[float], float]:
"""
General interpolation function factory
:param source_low:
Low input value
:param source_high:
High input value
:param dest_low:
Desired output value when the input is equal to source_low
:param dest_high:
Desired output value when the input is equal to source_high
:param lock_range:
If true (default) the output values will always be strictly constrained to the dest_high, dest_low range,
if false then input values outside of source_low, source_high will result in output values beyond this
constraint, acting as true linear interpolation.
:return:
a function of source->dest which applies the specified interpolation
"""
if source_low > source_high:
# Ensure that source_low is always numerically less than source_high
return interpolator(source_high, source_low, dest_high, dest_low, lock_range)
if source_low == source_high:
raise ValueError(f'unable to create interpolator, source_low == source_high == {source_low}')
source_range_inverse = 1 / (source_high - source_low)
# If we're not locked then just use interpolation
def inner_interpolator(value: float) -> float:
i = (value - source_low) * source_range_inverse
return i * dest_high + (1.0 - i) * dest_low
# Otherwise return a version which clamps the interpolation value between 0 and 1
def inner_locked_interpolator(value: float) -> float:
i = max(0.0, min(1.0, (value - source_low) * source_range_inverse))
return i * dest_high + (1.0 - i) * dest_low
# Return a function from source -> dest
return inner_locked_interpolator if lock_range else inner_interpolator | c1c399459157cb4d16527add9b058b17eda38cc3 | 686,252 |
import struct
def of_slicer(remaining_data):
"""Slice a raw bytes into OpenFlow packets"""
data_len = len(remaining_data)
pkts = []
while data_len > 3:
length_field = struct.unpack('!H', remaining_data[2:4])[0]
if data_len >= length_field:
pkts.append(remaining_data[:length_field])
remaining_data = remaining_data[length_field:]
data_len = len(remaining_data)
else:
break
return pkts, remaining_data | 77bd851e4b3fc1111c5142fab1439820c165989b | 686,253 |
def ne(value, other):
"""Not equal"""
return value != other | 27a1b98a96b184c359a2fe5ed4c6692e59632a71 | 686,260 |
def type_match(haystack, needle):
"""Check whether the needle list is fully contained within the haystack
list, starting from the front."""
if len(needle) > len(haystack):
return False
for idx in range(0, len(needle)):
if haystack[idx] != needle[idx]:
return False
return True | 7bb2c9db85647d8fff144a7ac23522cd624d9e79 | 686,261 |
def get_range(xf, yf, low, high):
"""Gets the values in a range of frequencies.
Arguments:
xf: A list of frequencies, generated by rfftfreq
yf: The amplitudes of frequencies, generated by rfft
low: Lower bound of frequency to capture
high: Upper bound of frequency to capture
returns: Array of values within frquencies.
"""
x1 = -1
for i in range(len(xf)):
if xf[i] >= low:
x1 = i
break
if x1 == -1:
raise ValueError
x2 = -1
for i in range(len(xf) - x1):
if xf[i + x1] >= high:
x2 = i + x1
break
if x2 == -1:
raise ValueError
return [abs(x) for x in yf[x1:x2]] | e213008fd3bd4e2db9b3326a23f6db6c0c1c2612 | 686,265 |
import pickle
def pickle_load(path):
"""Un-pickles a file provided at the input path
Parameters
----------
path : str
path of the pickle file to read
Returns
-------
dict
data that was stored in the input pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f) | 8d68e349118eb2fa2d5c1ce5b31b65cbc29a382f | 686,268 |
from typing import Any
import json
def is_json_serializable(obj: Any) -> bool:
"""Check if the object is json serializable
Args:
obj (Any): object
Returns:
bool: True for a json serializable object
"""
try:
json.dumps(obj, ensure_ascii=False)
return True
except:
return False | 4cbc7c1a0f40bbf29d7b1a54d3dff83c04c8c44a | 686,271 |
def shortest_path(g, start, goal):
""" Complete BFS via MIT algorith
g -> Graph represented via Adjacency list (Dict of sets)
start -> start vertex
goal -> end vertex
Returns shortests path from start to goal
"""
# level will contains shortest path for each vertex to start
level = {start: 0}
# parent will contains backtracks for each vertex to its parent
parent = {start: None}
i = 1
frontier = [start]
while frontier:
next_f = []
for u in frontier:
for v in g[u]:
if v not in level:
level[v] = i
parent[v] = u
next_f.append(v)
frontier = next_f
i += 1
return level[goal] - 1 | d2eb7e4daa567040b19e3f187334d0552f23f325 | 686,272 |
import re
def fix_links(chunk, tag2file):
"""Find and fix the the destinations of hyperlinks using HTML or markdown syntax
Fix any link in a string text so that they can target a different html document.
First use regex on a HTML text to find any HTML or markdown hyperlinks
(e.g. <a href="#sec1"> or [sec1](#sec1) ). Then use a dictionary to prepend the
filename to the value of a link's href attribute (e.g. <a href="02_jupyterbook.html#sec1">)
:param str chunk: text string
:param dict tag2file: dictionary mapping a tag to a file basename e.g. tag2file['sec1']='02_jupyterbook'
:return: chunk with fixed links
:rtype: str
"""
chunk_out = chunk
# html links
pattern_tag = r'[\w _\-:]'
pattern = r'<' + pattern_tag + '+ href=[\\\]{0,2}["\']#(' + pattern_tag + '+)[\\\]{0,2}["\'][^>]*>'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' +tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
# markdown links
pattern = r'\[' + pattern_tag + '+\]\(#(' + pattern_tag + '+)\)'
for m in re.finditer(pattern, chunk):
match = m.group()
tag = m.group(1)
fixed_tag = match.replace('#' + tag, tag2file.get(tag, tag) + '.html#' + tag)
chunk_out = chunk_out.replace(match, fixed_tag)
return chunk_out | 5816ba69c2ee04427fbde8f355eb8c4c379805fb | 686,276 |
def is_numeric(value):
"""Test if a value is numeric."""
return isinstance(value, int) or isinstance(value, float) | 6aed83b3c21554a79d3d11719cf200bdac1645c1 | 686,277 |
def value_of(column):
"""value_of({'S': 'Space Invaders'}) -> 'Space Invaders'"""
return next(iter(column.values())) | 9a18923c805bdd71c7bf94c933f96f4384e9fb02 | 686,278 |
import six
import json
import zlib
import base64
def send_request(req_url, req_json={}, compress=False, post=True, api_key=None):
""" Sends a POST/GET request to req_url with req_json, default to POST.
Returns:
The payload returned by sending the POST/GET request formatted as a dict.
"""
# Get the API key
headers = {
'x-api-key': api_key,
'Content-Type': 'application/json'
}
# Send the request and verify the request succeeded
if post:
req = six.moves.urllib.request.Request(req_url, data=json.dumps(req_json).encode('utf-8'), headers=headers)
else:
req = six.moves.urllib.request.Request(req_url, headers=headers)
try:
res = six.moves.urllib.request.urlopen(req)
except six.moves.urllib.error.HTTPError as e:
raise ValueError(
'Response error: An HTTP {} code was returned by the mixer. Printing '
'response\n\n{}'.format(e.code, e.read()))
# Get the JSON
res_json = json.loads(res.read())
if 'payload' not in res_json:
raise ValueError('Response error: Payload not found. Printing response\n\n''{}'.format(res.text))
# If the payload is compressed, decompress and decode it
payload = res_json['payload']
if compress:
payload = zlib.decompress(base64.b64decode(payload), zlib.MAX_WBITS | 32)
return json.loads(payload) | 323ed8230e1fecae16f3b3307658aa2457d7cc7b | 686,279 |
import re
def isEndStoryText(string):
""" Return True if reach the end of stories. """
match = re.search(r'^\*\*\*', string)
if match == None:
r = False
else:
r = True
return r | 36f2b8f333c4188c2c3a53a6da036308e8a8152e | 686,281 |
def depth(obj):
""" Calculate the depth of a list object obj.
Return 0 if obj is a non-list, or 1 + maximum
depth of elements of obj, a possibly nested
list of objects.
Assume: obj has finite nesting depth
@param int|list[int|list[...]] obj: possibly nested list of objects
@rtype: int
>>> depth(3)
0
>>> depth([])
1
>>> depth([1, 2, 3])
1
>>> depth([1, [2, 3], 4])
2
>>> depth([[], [[]]])
3
"""
if not isinstance(obj, list):
# obj is not a list
return 0
elif obj == []:
return 1
else:
# obj is a list
return 1 + max([depth(elem) for elem in obj]) | 6a1605b9bad8ba9255ef786dd8108c293bfd0205 | 686,285 |
def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result | 6e8c55892ce199794b5b8b73837b6475a339f0d5 | 686,287 |
from typing import List
def _get_name_matches(name: str, guess_words: List[str]) -> int:
"""
Return the number of common words in a str and list of words
:param name:
:param guess_words:
:return: number of matches
"""
matches = sum(word in name for word in guess_words)
return matches | c73335ea6c186626e1823e2a4775ed987d78adaf | 686,289 |
def repo_url_to_full_name(url):
"""Convert a repository absolute URL to ``full_name`` format used by Github.
Parameters
----------
url : str
URL of the repository.
Returns
-------
url : str
Full name of the repository accordingly with Github API.
"""
return "/".join(url.split("/")[3:]) | b7deb19b97929b2286d7e1ea5005b8674bdf9f50 | 686,290 |
def calculate_variance(n, p):
"""
Calculate the sample variance for a binominal distribution
"""
return p * (1 - p) / n | b5aeaf0201a1a0c2b0de0359165f4cc746c40291 | 686,293 |
def _arguments_str_from_dictionary(options):
"""
Convert method options passed as a dictionary to a str object
having the form of the method arguments
"""
option_string = ""
for k in options:
if isinstance(options[k], str):
option_string += k+"='"+str(options[k])+"',"
else:
option_string += k+"="+str(options[k])+","
option_string = option_string.strip(',')
return option_string | 6f0cf1176f0bcada81dc8dbe17cb57e760fd4d8c | 686,295 |
def transpose(matrix):
"""
Takes transpose of the input matrix.
Args:
matrix: 2D list
Return:
result: 2D list
"""
result = [[matrix[col][row] for col in range(len(matrix))] for row in range(len(matrix[0])) ]
return result | a7d0394170077775b5846953c5f17184665ac09d | 686,298 |
from typing import Optional
from typing import Dict
from typing import Any
import jinja2
def merge_template(template_filename: str, config: Optional[Dict[str, Any]]) -> str:
"""Load a Jinja2 template from file and merge configuration."""
# Step 1: get raw content as a string
with open(template_filename) as f:
raw_content = f.read()
# Step 2: Treat raw_content as a Jinja2 template if providing configuration
if config:
content = jinja2.Template(raw_content, undefined=jinja2.StrictUndefined).render(**config)
else:
content = raw_content
return content | f3edcf327ba4b35aa9a6f5a063c4a735848387d0 | 686,300 |
def anonymise_max_length_pk(instance, field):
"""
Handle scenarios where the field has a max_length which makes it smaller than
the pk (i.e UUID).
"""
if hasattr(field, "max_length") and field.max_length and len(str(instance.pk)) > field.max_length:
return str(instance.pk)[:field.max_length]
else:
return str(instance.pk) | 6530563a04548008c98596de0d1d22f7d1c8f5c3 | 686,302 |
import torch
from typing import List
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
"""Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
"""
boxes = torch.cat((
priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4],
priors[:, 2:4] * torch.exp(loc[:, 2:4] * variances[1]),
priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)
# prepare final output
tmp = boxes[:, 0:2] - boxes[:, 2:4] / 2
return torch.cat((tmp, boxes[:, 2:4] + tmp, boxes[:, 4:]), dim=-1) | afed28303a045edc6c9f6fc30da4efd051ea0858 | 686,303 |
def wrap_argument(text):
"""
Wrap command argument in quotes and escape when this contains special characters.
"""
if not any(x in text for x in [' ', '"', "'", '\\']):
return text
else:
return '"%s"' % (text.replace('\\', r'\\').replace('"', r'\"'), ) | e398e91a1ae3b218ba0ad88f6aeba9be7dc3ebdd | 686,306 |
def filter_dict(pred, d) :
"""Return a subset of the dictionary d, consisting only of the keys that satisfy pred(key)."""
ret = {}
for k in d :
if pred(k) :
ret[k] = d[k]
return ret | e51bae7cdd663287b9f90b95f12025a51fdaadc5 | 686,309 |
import json
def df_to_dict(df, orient='None'):
"""
Replacement for pandas' to_dict which has trouble with
upcasting ints to floats in the case of other floats being there.
https://github.com/pandas-dev/pandas/issues/12859#issuecomment-208319535
see also https://stackoverflow.com/questions/37897527/get-python-pandas-to-dict-with-orient-records-but-without-float-cast
:param df: a pandas DataFrame
:param orient: The format of the intermediate JSON string and resulting dict
:return: dict
"""
return json.loads(df.to_json(orient=orient)) | 684cb946dd59c5e30084f61435bd9e7d349d3940 | 686,314 |
def create_data_model(distancematrix):
"""
Create a dictionary/data model
Parameters:
distancematrix (float[][]): array of distances between addresses
Returns:
dictionary: data model generated
"""
# initiate ORTools
data = {}
data['distance_matrix'] = distancematrix
data['num_vehicles'] = 1
data['depot'] = 0
return (data) | de4968895a15793c1981e18f6731233323752526 | 686,316 |
def read_scores_into_list(scores_file):
"""
Read in scores file, where scores are stored in 2nd column.
"""
scores_list = []
with open(scores_file) as f:
for line in f:
cols = line.strip().split("\t")
scores_list.append(float(cols[1]))
f.closed
assert scores_list, "no scores read in (scores_list empty)"
return scores_list | 3b972bef937293aa12101205ae2a4979bc2161f6 | 686,321 |
def fields(cursor):
""" Given a DB API 2.0 cursor object that has been executed, returns
a dictionary that maps each field name to a column index, 0 and up. """
results = {}
for column, desc in enumerate(cursor.description):
results[desc[0]] = column
return results | 2ee53896033dd05f88394605850b34be87e719d4 | 686,323 |
def introspection_email(request):
"""Returns the email to be returned by the introspection endpoint."""
return request.param if hasattr(request, 'param') else None | a2cf38e02fe16571e0971142528ee4a7a001564b | 686,325 |
def submat(M,i,j):
"""
return a copy of matrix `M` whitout row `i` and column `j`
"""
N = M.copy()
N.row_del(i)
N.col_del(j)
return N | cb3de622a554645b4600f801e607d1e66a225a8e | 686,326 |
import json
def parse_data_file(data_path):
"""
Parse data file with benchmark run results.
"""
with open(data_path, "r") as fp:
content = fp.read()
data = json.loads(content)
return data | 87e337d1b5a7809382aa2749881894df791e4c9f | 686,327 |
import re
def splitTypeName(name):
""" Split the vendor from the name. splitTypeName('FooTypeEXT') => ('FooType', 'EXT'). """
suffixMatch = re.search(r'[A-Z][A-Z]+$', name)
prefix = name
suffix = ''
if suffixMatch:
suffix = suffixMatch.group()
prefix = name[:-len(suffix)]
return (prefix, suffix) | 1a7013b82e554fe6ff6ebff1b84d7d96ae3481db | 686,328 |
from typing import Dict
def stream_error(e: BaseException, line: str) -> Dict:
"""
Return an error `_jc_meta` field.
"""
return {
'_jc_meta':
{
'success': False,
'error': f'{e.__class__.__name__}: {e}',
'line': line.strip()
}
} | d7c9364103d03dc4ca00c3ca447f9ae8a0b20f08 | 686,329 |
def import_object(name):
"""Function that returns a class in a module given its dot import statement."""
name_module, name_class = name.rsplit('.', 1)
module = __import__(name_module, fromlist=[name_class])
return getattr(module, name_class) | da49ca7198f53fb6540b8179d7ba481fa09ff9fb | 686,331 |
def is_numeric(value):
""" check whether a value is numeric (could be float, int, or numpy numeric type) """
return hasattr(value, "__sub__") and hasattr(value, "__mul__") | dd4320dcaaf51a39589f812b5d2ec5ca20976b24 | 686,333 |
def has_extension(filepath, extensions):
"""Check if file extension is in given extensions."""
return any(filepath.endswith(ext) for ext in extensions) | 9ad2f3692185340ecf170b5befef69df4e4fc4b8 | 686,337 |
def to_basestring(s):
"""Converts a string argument to a byte string.
If the argument is already a byte string, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(s, bytes):
return s
return s.encode('utf-8') | 58cb048fbbbb73d34a8b88ab0555da2a99cd74c5 | 686,338 |
def cross(str_a, str_b):
"""Cross product (concatenation) of two strings A and B."""
return [a + b for a in str_a for b in str_b] | c6e026e19bbde0749272adec8baa3ebf7b19d32e | 686,343 |
def getKeyNamePath(kms_client, project_id, location, key_ring, key_name):
"""
Args:
kms_client: Client instantiation
project_id: str -
location: str -
key_ring: str -
key_name: str -
Returns: key_name: str - 'projects/YOUR_GCLOUD_PROJECT/locations/YOUR_LOCATION/keyRings/YOUR_KEY_RING/cryptoKeys
/YOUR_CRYPTO_KEY
"""
key_name_path = kms_client.crypto_key_path_path(project_id=project_id,
location=location,
key_ring=key_ring,
crypto_key_path=key_name)
return key_name_path | 059e30a69aba64e641f9c8aaecab0b575322e877 | 686,344 |
def num_pos(y_test):
"""
Gets number of positive labels in test set.
:param y_test: Labels of test set
:type nb_points: `np.array`
:return: number of positive labels
:rtype: `int`
"""
if y_test == []:
return 0
else:
return sum(y_test) | 5cdb04d6b15e38e26310ceb545fea33c992540d1 | 686,346 |
def location(C,s,k):
"""
Computes the location corresponding to the k-value along a segment of a polyline
Parameters
----------
C : [(x,y),...] list of tuples
The coordinates of the polyline.
s : int
The index of a segment on polyline C. Must be within [0,n-2]
k : float
The proportion from the start pt to the end pt of the segment.
Returns
----------
(x,y) : (float,float)
The computed location.
"""
x = C[s][0] + k*(C[s+1][0]-C[s][0])
y = C[s][1] + k*(C[s+1][1]-C[s][1])
return (x,y) | 7e7ae7cb89f2ee73a916bd6bc923d3f62438fb41 | 686,347 |
import re
def matchLettersAndNumbersOnly(value):
"""Match strings of letters and numbers."""
if re.match('^[a-zA-Z0-9]+$', value):
return True
return False | c15cc90c6db57411373f5446abdd1ea7d66664bf | 686,350 |
import math
def haversine_distance(origin, destination):
""" Haversine formula to calculate the distance between two lat/long points on a sphere """
radius = 6371.0 # FAA approved globe radius in km
dlat = math.radians(destination[0]-origin[0])
dlon = math.radians(destination[1]-origin[1])
a = math.sin(dlat/2.) * math.sin(dlat/2.) + math.cos(math.radians(origin[0])) \
* math.cos(math.radians(destination[0])) * math.sin(dlon/2.) * math.sin(dlon/2.)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d | f9e9591ba20d8a024e8d06d78a0872cefe1b5d25 | 686,352 |
from pathlib import Path
from typing import Union
import hashlib
import re
def insert_hash(path: Path, content: Union[str, bytes], *, hash_length=7, hash_algorithm=hashlib.md5):
"""
Insert a hash based on the content into the path after the first dot.
hash_length 7 matches git commit short references
"""
if isinstance(content, str):
content = content.encode()
hash_ = hash_algorithm(content).hexdigest()[:hash_length]
if '.' in path.name:
new_name = re.sub(r'\.', f'.{hash_}.', path.name, count=1)
else:
new_name = f'{path.name}.{hash_}'
return path.with_name(new_name) | 65854fcefe34bb36c4036465a26cddbd5b3efedb | 686,353 |
def build_dataservices_by_publisher_query() -> str:
"""Build query to count dataservices grouped by publisher."""
return """
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX dcat: <http://www.w3.org/ns/dcat#>
SELECT ?organizationNumber (COUNT(DISTINCT ?service) AS ?count)
FROM <https://dataservices.fellesdatakatalog.digdir.no>
WHERE {{
?service a dcat:DataService .
?service dct:publisher ?publisher .
?publisher dct:identifier ?organizationNumber .
}}
GROUP BY ?organizationNumber""" | 59922406f0b2d73ad2e552039b12acc60a36893b | 686,357 |
async def get_action(game_state, message: str, possible_actions: list) -> int:
"""
Helper function used to get action from player for current state of game.
:param game_state: GameState object with all game data inside
:param message: string with message for player
:param possible_actions: list with all possible action for given GameState
:return: int value of chosen action
"""
shelter = game_state.active_player
if len(possible_actions) == 1:
return possible_actions[0]
while True:
action = await shelter.input_async(message)
if action in possible_actions:
break
else:
shelter.print(f'No such action as {action}!')
return action | ab7dfc57909289f217098612eac05edf67139812 | 686,360 |
def human_file_size(bytes_):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc.).
"""
try:
bytes_ = float(bytes_)
except (TypeError, ValueError, UnicodeDecodeError):
return '0 bytes'
def filesize_number_format(value):
return round(value, 2)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
negative = bytes_ < 0
if negative:
bytes_ = -bytes_ # Allow formatting of negative numbers.
if bytes_ < KB:
value = "{} bytes".format(bytes_)
elif bytes_ < MB:
value = "{} KB".format(filesize_number_format(bytes_ / KB))
elif bytes_ < GB:
value = "{} MB".format(filesize_number_format(bytes_ / MB))
elif bytes_ < TB:
value = "{} GB".format(filesize_number_format(bytes_ / GB))
elif bytes_ < PB:
value = "{} TB".format(filesize_number_format(bytes_ / TB))
else:
value = "{} PB".format( filesize_number_format(bytes_ / PB))
return value | fc5761f3403acc502c96d049f7d2dbbce900ea32 | 686,363 |
def square(file_index, rank_index):
"""Gets a square number by file and rank index."""
return rank_index * 8 + file_index | 374e98e5ac0ff86cf0e1eded12a537ca3b48e2fb | 686,366 |
from typing import Tuple
def file_name_to_parts(image_file_name) -> Tuple[str, str, int]:
"""
Given the `file_name` field in an iMerit annotation, return the dataset name,
sequence id and frame number.
"""
parts = image_file_name.split('.')
dataset_name = parts[0]
seq_id = parts[1].split('seq')[1]
frame_num = int(parts[2].split('frame')[1])
return dataset_name, seq_id, frame_num | 61861a8ff1d5b49e656ab61f5179e627e717f3ff | 686,368 |
import collections
def trainval_log_statistic(trainval_log):
"""
Args:
trainval_log (dict): output of function: read_trainval_log_file
:return:
e.g.:
>>> result = {
>>> 'total_epoch': 100,
>>> 'last_top-1_val_accuracy': 0.87,
>>> 'last_top-2_val_accuracy': 0.95,
...
>>> 'max_top-1_val_accuracy': 0.88,
>>> 'last_top-1_epoch': 65,
>>> 'max_top-2_val_accuracy': 0.96,
>>> 'last_top-2_epoch': 45,
...
>>> }
"""
result = {'total_epoch': len(trainval_log['epoch'])}
# last top-k validation accuracy
for k in range(len(trainval_log['val_accuracy'][0])):
result[f'last_top-{k + 1}_val_accuracy'] = trainval_log['val_accuracy'][-1][k]
# max top-k validation accuracy
top_k_acc = collections.OrderedDict()
for k in range(len(trainval_log['val_accuracy'][0])):
top_k_acc[str(k + 1)] = []
for epoch in trainval_log['epoch']:
top_k_acc[str(k + 1)].append(trainval_log['val_accuracy'][epoch-1][k])
for key in top_k_acc.keys():
max_acc = max(top_k_acc[key])
max_acc_epoch = trainval_log['epoch'][top_k_acc[key].index(max_acc)]
result[f'max_top-{key}_val_accuracy'] = max_acc
result[f'max_top-{key}_epoch'] = max_acc_epoch
return result | ec29b5f7a6099f6d9991a2d3e62e61c76590469c | 686,369 |
import hashlib
def sha256_encode(text):
"""
Returns the digest of SHA-256 of the text
"""
_hash = hashlib.sha256
if type(text) is str:
return _hash(text.encode('utf8')).digest()
elif type(text) is bytes:
return _hash(text).digest()
elif not text:
# Generally for calls where the payload is empty. Eg: get calls
# Fix for AttributeError: 'NoneType' object has no attribute 'encode'
return _hash("".encode('utf8')).digest()
else:
return _hash(str(text).encode('utf-8')).digest() | 88a39fb82beefa4f89abda7f995347ca16573d28 | 686,371 |
def is_current_or_ancestor(page, current_page):
"""Returns True if the given page is the current page or is an ancestor of
the current page."""
return current_page.is_current_or_ancestor(page) | c2377e5898cb0152e4e6434c486bd500b4c54b2b | 686,372 |
def buildVecWithFunction(da, func, extra_args=()):
"""
Construct a vector using a function applied on
each point of the mesh.
Parameters
==========
da : petsc.DMDA
The mesh structure.
func: function
Function to apply on each point.
extra_args: tuple
extra parameters of the function.
Returns
=======
b: petsc.Vec
The vector with the function values on each point.
"""
OUT = da.createGlobalVec()
out = da.getVecArray(OUT)
coords = da.getVecArray(da.getCoordinates())
if da.getDim() == 2:
(xs, xe), (ys, ye) = da.getRanges()
func(coords[xs:xe, ys:ye], out[xs:xe, ys:ye], *extra_args)
else:
(xs, xe), (ys, ye), (zs, ze) = da.getRanges()
func(coords[xs:xe, ys:ye, zs:ze], out[xs:xe, ys:ye, zs:ze], *extra_args)
return OUT | 4330c4f132370ac69f5303a4a257adb05fa44976 | 686,376 |
def _clean_sambam_id(inputname: str) -> str:
"""Sometimes there are additional characters in the fast5 names added
on by albacore or MinKnow. They have variable length, so this
attempts to clean the name to match what is stored by the fast5 files.
There are 5 fields. The first has a variable length.
[7x or 8x az09]-[4x az09]-[4x az09]-[4x az09]-[12x az09]
0688dd3-160d-4e2c-8af8-71c66c8db127
7e33249c-144c-44e2-af45-ed977f6972d8
67cbf79c-e341-4d5d-97b7-f3d6c91d9a85
"""
# just grab the first five things when splitting with dashes
splitname = inputname.split("-")[0:5]
# The last one might have extra characters, unknown. We're relying
# on the 5th field to consistently have 12 characters to extract
# the correct id
splitname[4] = splitname[4][0:12]
return "-".join(splitname) | f8c641a42144fcabcfb422f406e5c1c0ab2316b1 | 686,378 |
import json
def validate_invoking_event(event):
"""Verify the invoking event has all the necessary data fields."""
if 'invokingEvent' in event:
invoking_event = json.loads(event['invokingEvent'])
else:
raise Exception('Error, invokingEvent not found in event, aborting.')
if 'resultToken' not in event:
raise Exception('Error, resultToken not found in event, aborting.')
if 'configurationItem' not in invoking_event:
raise Exception("Error, configurationItem not found in event['invokingEvent'], aborting.")
if 'resourceType' not in invoking_event['configurationItem']:
raise Exception("Error, resourceType not found in event['invokingEvent']['configurationItem'], aborting.")
if 'configuration' not in invoking_event['configurationItem']:
raise Exception("Error, configuration not found in event['invokingEvent']['configurationItem'], aborting.")
if 'userName' not in invoking_event['configurationItem']['configuration']:
raise Exception("Error, userName not found in event['invokingEvent']['configurationItem']['configuration'], aborting.")
if 'resourceId' not in invoking_event['configurationItem']:
raise Exception("Error, resourceId not found in event['invokingEvent']['configurationItem'], aborting.")
if 'configurationItemCaptureTime' not in invoking_event['configurationItem']:
raise Exception("Error, configurationItemCaptureTime not found in event['invokingEvent']['configurationItem'], aborting.")
return invoking_event | 8c4780ddaea1e72e5c7883be75cf32333abcc47b | 686,381 |
import math
def get_oversized(length):
"""
The oddeven network requires a power-of-2 length.
This method computes the next power-of-2 from the *length* if
*length* is not a power-of-2 value.
"""
return 2 ** math.ceil(math.log2(length)) | ac76471a535d22f1e1ff28424f368a9df3711c82 | 686,383 |
def is_set(obj) -> bool:
"""Checks if the given object is either a set or a frozenset."""
return isinstance(obj, (set, frozenset)) | 6c968f282439aafe09e4bab4dbda4b3a475a2b81 | 686,384 |
def _csv_dict_row(user, mode, **kwargs):
"""
Convenience method to create dicts to pass to csv_import
"""
csv_dict_row = dict(kwargs)
csv_dict_row['user'] = user
csv_dict_row['mode'] = mode
return csv_dict_row | b6d47f9df34b8889233ea6037f1a19c5570c392c | 686,386 |
def divide(a, b):
"""Divide two numbers and return the quotient"""
#Perform the division if the denominator is not zero
if b != 0:
quotient = round(a/b, 4)
print("The quotient of " + str(a) + " and " + str(b) + " is " + str(quotient) + ".")
return str(a) + " / " + str(b) + " = " + str(quotient)
#Denominator is zero, result in error
else:
print("You cannot divide by zero.")
return "DIV ERROR" | c316182818ddad2b0bb5f8a455d07a4af78e7f90 | 686,389 |
import math
def Euclidean_distance(x,y):
"""
Given two vectors, calculate the euclidean distance between them.
Input: Two vectors
Output: Distance
"""
D = math.sqrt(sum([(a-b)**2 for a,b in zip(x,y)]))
return D | 07bde8ae91bb2a5ceeb303e515965af320083f6a | 686,392 |
from typing import Tuple
def neighbors(depths: list[list[int]], x: int, y: int) -> list[Tuple[int, int]]:
"""Generate the list of neighboring points for the given point"""
max_x = len(depths[0])
max_y = len(depths)
result = []
for _x, _y in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:
if 0 <= _x < max_x and 0 <= _y < max_y:
result.append((_x, _y))
return result | 97776164b229edfa12cf4dfa19b9409f33e07921 | 686,395 |
import re
def capitalise_chi(value):
"""CHI at the start of shelfmarks should always be capitalised."""
return re.sub(r'(?i)^chi\.', 'CHI.', value) | 30850eae926d9b808808787367bbce1379792881 | 686,397 |
def copy_doc(source):
"""Copy the docstring from another function (decorator).
The docstring of the source function is prepepended to the docstring of the
function wrapped by this decorator.
This is useful when inheriting from a class and overloading a method. This
decorator can be used to copy the docstring of the original method.
Parameters
----------
source : function
Function to copy the docstring from
Returns
-------
wrapper : function
The decorated function
Examples
--------
>>> class A:
... def m1():
... '''Docstring for m1'''
... pass
>>> class B (A):
... @copy_doc(A.m1)
... def m1():
... ''' this gets appended'''
... pass
>>> print(B.m1.__doc__)
Docstring for m1 this gets appended
"""
def wrapper(func):
if source.__doc__ is None or len(source.__doc__) == 0:
raise ValueError('Cannot copy docstring: docstring was empty.')
doc = source.__doc__
if func.__doc__ is not None:
doc += func.__doc__
func.__doc__ = doc
return func
return wrapper | dd914e0ee648509aafeee509cb0c2175ad8ce39f | 686,398 |
import string
def ishex(hexstr):
"""Checks if string is hexidecimal"""
return all(char in string.hexdigits for char in hexstr) | 9b3c96ddb1783263f07a00493161f4315947156c | 686,399 |
def build_ig_bio(part_of_day, sky_cond, sky_emoji, temp_feel):
"""Builds IG bio."""
return f"Back page of the internet.\n\nI look up this {part_of_day} and the Toronto sky looks like {sky_cond}{sky_emoji}.\nKinda feels like {temp_feel}\u00b0C." | 7b9d075f81045a4464545dc0b4548d50d2546845 | 686,401 |
def _get_num_to_fold(stretch: float, ngates: int) -> int:
"""Returns the number of gates to fold to achieve the desired (approximate)
stretch factor.
Args:
stretch: Floating point value to stretch the circuit by.
ngates: Number of gates in the circuit to stretch.
"""
return int(round(ngates * (stretch - 1.0) / 2.0)) | 71322e13f72111ba5af93c0011ef732ae5894771 | 686,407 |
import re
def lreplace(pattern, sub, string):
"""
Replaces 'pattern' in 'string' with 'sub' if 'pattern' starts 'string'.
"""
return re.sub('^%s' % pattern, sub, string) | 60364bc5aaabb0da7971b6d09f9dc5f5807ce613 | 686,410 |
import math
def normal(x1: float, y1: float, x2: float, y2: float) -> tuple[float, float]:
"""
Compute the normal vector given two points
"""
phi = math.atan2(y2 - y1, x2 - x1) + math.pi/2
return (math.cos(phi), math.sin(phi)) | 31cc8728e21d00f24b79140cf258956c0b52e353 | 686,411 |
def test_banjo_player_name() -> str:
"""This function test banjo player name and format message."""
name = input('Are you playing banjo? Enter your name: ')
if not name:
exit("error: you doesn't entered name!")
if name[0] == 'R' or name[0] == 'r':
msg = ' plays banjo'
else:
msg = ' does not play banjo'
return name + msg | 0a3802fdc5d9f7eb235c90b81173a9824b233e1a | 686,412 |
def motifScoreCmp(motifOcc1, motifOcc2):
"""Compares two motif occurences according to their pval."""
if (motifOcc1.getPval() < motifOcc2.getPval()):
return -1
elif (motifOcc1.getPval() == motifOcc2.getPval()):
return 0
else:
assert (motifOcc1.getPval() > motifOcc2.getPval())
return 1 | dce8e3e79c5a49230b3ca1aa85c2dad0d4101ed5 | 686,414 |
from typing import List
def get_requirements() -> List[str]:
"""Returns all requirements for this package."""
with open('requirements.txt') as f:
requirements = f.read().splitlines()
return requirements | fc49b661c07c40ecf1c8d51fb90eb56ee6049a56 | 686,416 |
def _proto_dataset_info(dataset):
"""Return information about proto dataset as a dict."""
# Analogous to dtool_info.inventory._dataset_info
info = {}
info['type'] = 'dtool-proto'
info['uri'] = dataset.uri
info['uuid'] = dataset.uuid
info["size_int"] = None
info["size_str"] = 'unknown'
info['creator'] = dataset._admin_metadata['creator_username']
info['name'] = dataset._admin_metadata['name']
info["date"] = 'not yet frozen'
info['readme_content'] = dataset.get_readme_content()
return info | 6072f5f9222f88ea1e33971036bd13d65754f39a | 686,420 |
def escape_cell(cell):
"""
Escape table cell contents.
:param cell: Table cell (as unicode string).
:return: Escaped cell (as unicode string).
"""
cell = cell.replace(u'\\', u'\\\\')
cell = cell.replace(u'\n', u'\\n')
cell = cell.replace(u'|', u'\\|')
return cell | 679a47ac662f3eb61020015eb082833e2cefb16e | 686,422 |
def get_vm_ips(nm_client, resource_group, vm_name):
"""
Get the private and public IP addresses for a given virtual machine.
If a virtual machine has the more than one IP address of each type, then
only the first one (as determined by the Azure SDK) is returned.
This function returns the following tuple: (private IP, public IP)
If a given VM does not have a private or public IP address, its tuple
entry will be None.
"""
for nif in nm_client.network_interfaces.list(resource_group):
if vm_name in nif.name:
ipc = nif.ip_configurations[0]
pub_ip = ipc.public_ip_address
if pub_ip:
pub_ip = pub_ip.ip_address
return (ipc.private_ip_address, pub_ip)
return (None, None) | fb32b4b16e1c70f268f42abe15fef02398d8e06d | 686,423 |
def interpolate_group(group, classes, params, group_names):
"""
In the dict returned by get_nm_group, replace class
and parameter IDs, and other group IDs, with their
appropriate string or dict representations.
:param group: the Group dict returned by get_nm_group()
:type group: dict
:param classes: the dict of classes returned by get_nm_group_classes()
:type classes: dict
:param params: the dict of parameters returned by get_nm_group_params()
:type params: dict
:param group_names: the dict of group IDs to names returned by get_group_names()
:type group_names: dict
:returns: group dict, with classes and params interpolated
:rtype: dict
"""
g_params = group.get('parameters', {})
params_text = {}
for p in g_params:
foo = params[p]
params_text[foo['paramkey']] = foo['paramvalue']
group['parameters'] = params_text
g_classes = group.get('classes', {})
classes_text = {}
for c in g_classes:
foo = classes[c]
classes_text[foo['classname']] = foo['classparams']
group['classes'] = classes_text
g_parents = group.get('parents', {})
parents_text = []
for p in g_parents:
parents_text.append(group_names[p])
group['parents'] = parents_text
g_groups = group.get('groups', {})
groups_text = []
for g in g_groups:
groups_text.append(group_names[g])
group['groups'] = groups_text
return group | e0fa803da2bc3f897cb2e27b282ee59dd9341512 | 686,425 |
def iterize(obj):
"""
Converts into or wraps in an iterator.
If `obj` is an iterable object other than a `str`, returns an iterator.
Otherwise, returns a one-element iterable of `obj`.
>>> list(iterize((1, 2, 3)))
[1, 2, 3]
>>> list(iterize("Hello!"))
['Hello!']
>>> list(iterize(42))
[42]
"""
if isinstance(obj, str):
return iter((obj, ))
else:
try:
return iter(obj)
except TypeError:
return iter((obj, )) | 83964bb497339a9aa73def3911a25bf743b0b29e | 686,430 |
import time
def to_timestamp(time_val):
"""Generate a unix timestamp for the given datetime instance"""
return time.mktime(time_val.timetuple()) | 076e0584cdc4a85fe4d98b2511915ac5bdae98ea | 686,433 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.