content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def fields(d, names, pred=lambda x: True,
key_transform=lambda x: x, value_transform=lambda x: x):
"""Returns the entries in this dictionary with keys appearing in names.
:type d: dict
:type names: [a]
:param pred: a filter that is applied to the values of the dictionary.
:type pred: (a ... | 19c39316bb39cdb72e45bb7c2e79d9cacbaffe3c | 679,218 |
def HarmonicPairConstraints(a,b,value,sd):
"""
Returns properly formated string for declaring harmonic distance constraints
between CA carbons of resiedues a and b, at distance "value", and "sd" standard deviation.
"""
st = "AtomPair CA %i CA %i HARMONIC %3.1f %3.1f\n" %(a,b,value,sd)
return st | 9cb284f171ba9f0f4b30bb02302d4b66ee4e13e2 | 679,219 |
def grab_data(h5file, path, col, matching=None):
"""Grabs data in a path matching parameters
Parameters
----------
h5file : PyTables HDF5 File handle
path : str
the path to the appropriate table
col : str
the target column name
matching : tuple, optional
a tuple ... | 3600ff5d6bf9613b8ffd521723e294ab843e7cbc | 679,221 |
def get_min(data_frame, field):
"""Calculates the minimum value of a given field in a given DataFrame.
Args:
data_frame (DataFrame): the Pandas DataFrame to analyze
field (string): the field to calculate the values of
Returns:
Series: the minimum value of the field
"""
retu... | 3f36fa254c491e8f4756b1f0c9a6bfb59bea9fbc | 679,225 |
import torch
def kinetic_energy_reg_func(x: torch.Tensor,
t: torch.Tensor,
dx: torch.Tensor,
unused_context) -> torch.Tensor:
"""
Quadratic cost / kinetic energy regularization: https://arxiv.org/pdf/2002.02798.pdf
:param... | 7c90e604e6e0a9a17f14738fe180914faf7e623f | 679,226 |
import re
def first_clean_up(lines):
"""
A function to remove line delimiters, comments, tabs, etc. from the
.sas dictionary file.
Parameters
----------
lines : list of strings; the raw file
"""
#Remove all tabs from the string
for i in range(0, len(lines)):
lines[i]... | 5d54cb69db6164bb8eb994d473cfa623e8bf337e | 679,228 |
def verify_request_target(replay_json, request_target):
"""
Verify that the 'url' element of the first transaction contains the request target.
"""
try:
url = replay_json['sessions'][0]['transactions'][0]['client-request']['url']
except KeyError:
print("The replay file did not have a... | 5be4e46d0a0d4ef744b35a2a1f1765c6950cd50d | 679,229 |
from typing import List
def gray_max(colors: List[int]) -> float:
"""
Convert to grayscale: get brightest value from channels.
:param colors: [R, G, B] values
:return: one grayscale color
"""
return max(colors) | afba2f9e40960c1e5abffba96c887f3eff9ca69e | 679,231 |
import pickle
def load(ensemble):
"""
Load a previously pickled ensemble.
"""
return pickle.loads(ensemble) | 61af703ee0dc026299359bcbf67aa178109c6640 | 679,237 |
def empty_table(unique_database, request):
"""Create an empty table within the test database before executing test.
The table will have the same name as the test_function itself. Setup and teardown
of the database is handled by the unique_database fixture.
Args:
unique_database: pytest fixture defined in ... | 5bbb079be2e2c1db2e55843937ac129e7317b3e3 | 679,238 |
def fix_line_breaks(text):
""" Convert Win line breaks to Unix
"""
return text.replace("\r\n", "\n") | c4c698fce80d7c3820f689a163d0df19ea682573 | 679,240 |
def ignition_delay(states, species):
"""
This function computes the ignition delay from the occurrence of the
peak in species' concentration.
"""
i_ign = states(species).Y.argmax()
return states.t[i_ign] | 5abcbebbff401cd5f6f9ab9634edcf344cbea41d | 679,256 |
def average_tweets_per_user(tweets, users_with_freq):
"""
Return the average number of tweets per user from a list of tweets.
:param tweets: the list of tweets.
:param users_with_freq: a Counter of usernames with the number of tweets in 'tweets' from each user.
:return: float. average number ... | f2fc5b725003b39a5e429a4945007fbb16640b54 | 679,261 |
import torch
def cross_entropy_soft_targets(predicted_distribution, target_distribution):
"""Cross entropy loss with soft targets.
B = batch size, D = dimension of target (num classes), N = ensemble size
Args:
inputs (torch.tensor((B, D - 1))): predicted distribution
soft_target (torch.te... | 4c142ae3ab9440bd5a5456ce70f6b4493cc89256 | 679,264 |
import math
def pwr_list_elements(in_list, populationSet=True):
"""
Return mean power of list elements (= mean**2 + std**2)
When population is True divide by len(in_list) to get the standard deviation of a complete set,
else divide by len(in_list)-1 to get the standard deviation for a sample of a... | 85a211a8f0837f0df355b7f3308eaaf50677ac25 | 679,266 |
import csv
def loadGrabbers(fname="grabbers.csv"):
"""
Read a CSV file and return contents as a list of dictionaries.
"""
columns = ["ID", "call", "title", "name", "loc", "site", "url"]
grabbers = []
with open(fname) as f:
reader = csv.reader(f, delimiter=",", quotechar='"')
fo... | e0472382ec4547bc53ab5d3c2d8aa3cc48ba64d3 | 679,268 |
def make_car(manufacturer, model, **car_info):
"""Build a dictionary containing information about a car."""
car_info['manufacturer_name'] = manufacturer
car_info['model_name'] = model
return car_info | 01db04acdbfea8d4604d82ee6246f18afc147b51 | 679,269 |
from typing import List
def split_into_lists(input_list: List, target_number_of_lists: int) -> List[List]:
"""
Evenly splits list into n lists.
E.g split_into_lists([1,2,3,4], 4) returns [[1], [2], [3], [4]].
:param input_list: object to split
:param target_number_of_lists: how many lists to spli... | daf9bcad6d86d3654c36bca15f1c9943acb21159 | 679,270 |
def get_job(api, job_id):
"""Get a Borgy job."""
return api.v1_jobs_job_id_get(job_id) | 70b537d6067417479e33d4d9b38f4291af6c1ccb | 679,271 |
import requests
def get_json_from_query(location):
"""Search for a city and return metadata from API"""
url = f"https://www.metaweather.com/api/location/search/?query={location}"
r = requests.get(url).json()
return r[0] | 281c7d8f3f3b6bb92bf4bdb5d76dd3f3c4499143 | 679,272 |
def is_punkt(token):
"""
Return if token consists of only punctuation and whitespace
Args:
token: single token
Returns:
Boolean
Raises:
None
Examples:
>>> is_punkt(" ")
True
>>> is_punkt(", ,")
True
>>> is_punkt("?!!")
True
... | 07da56563f7a11a3c6899dd6f3f4bc941338bff4 | 679,274 |
def movie_sort(movies):
"""Sorts a list of movies by their release date using bubble sort.
Args:
movies: a list of movies.
Returns:
A sorted list of movies.
"""
sorted_movies = movies.copy()
swapped = True
while swapped:
swapped = False
for i in range(1, len... | ef1399f7af77ad9ef8dc3e5a4d4c2b0d93c119ff | 679,278 |
def xor_hex_strings(bytes_a, bytes_b):
# type: (str, str) -> str
"""Given two hex strings of equal length, return a hex string with
the bitwise xor of the two hex strings."""
assert len(bytes_a) == len(bytes_b)
return ''.join(["%x" % (int(x, 16) ^ int(y, 16))
for x, y in zip(byte... | 7031b25da743c0a9b4de517c6c4d24606345ee8e | 679,280 |
import re
import json
def read_job_properties(jobscript,
prefix="# properties",
pattern=re.compile("# properties = (.*)")):
"""Read the job properties defined in a snakemake jobscript.
This function is a helper for writing custom wrappers for the
snakemake ... | ea81eec9dfc9bbfe65c19c5f3169e7568300aecb | 679,282 |
def solve(input):
"""Solve the puzzle."""
return int(input/3)-2 | 6a4c7fea5d17a4ab736bab850b108dcf13111401 | 679,284 |
def Dic_Extract_By_Subkeylist(indic,keylist):
"""
Return a new dic by extracting the key/value paris present in keylist
"""
outdic={}
for key in keylist:
try:
outdic[key]=indic[key]
except KeyError:
raise KeyError("input key {0} not present!".format(key))
... | a0ac2c9ee28de9fc8cabfb05f37b57009841b03b | 679,290 |
def canonicalize_tensor_name(name):
"""Canonicalizes tensor names.
For an op that produces only one output, we may be refer to its output tensor
as either "op_name:0" or simply "op_name". This standardizes all internal
names as "op_name:0" to simplify the logic.
Args:
name: Input name to canonicalize.
... | 5f32572372d9ad6a69f7f9991d2cd8beae3f3d07 | 679,295 |
def _get_header(request_type):
"""Returns header str for talking with cosmos
:param request_type: name of specified request (ie uninstall-request)
:type request_type: str
:returns: header information
:rtype: str
"""
return ("application/vnd.dcos.package.{}+json;"
"charset=utf-8... | 9cb0f296455b78ef522a2797d7a07046853d11c8 | 679,296 |
def strip_html_comments(text):
"""Strip HTML comments from a unicode string."""
lines = text.splitlines(True) # preserve line endings.
# Remove HTML comments (which we only allow to take a special form).
new_lines = [line for line in lines if not line.startswith("<!--")]
return "".join(new_lines) | 289ab694a1fa2a6c9a1f60e0ead8b13e62a0bff0 | 679,297 |
import torch
def matrix_to_cartesian(batch: torch.Tensor, keep_square: bool = False) -> torch.Tensor:
"""
Transforms a matrix for a homogeneous transformation back to cartesian
coordinates.
Args:
batch: the batch oif matrices to convert back
keep_square: if False: returns a NDIM x NDI... | 3147a7d04b01f36a42b385de45676fa1ddad4581 | 679,298 |
import requests
import ssl
import base64
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().opens... | 3b54eb3fe3099892bf1bcd0d5c3dc6891951feeb | 679,303 |
def _is_inline_update(job):
"""This returns true if the job contains an inline update"""
if not job.metadata.get('update'):
return False
return job.metadata["update"].inline_query | 67e99413dfeb3aee28c13458cb7f7769155cb77b | 679,305 |
def _note(item):
"""Handle secure note entries
Returns: title, username, password, url, notes
"""
return f"{item['name']} - Secure Note", "", "", "", item.get('notes', '') or "" | 4bb92b4de36842753c650b77196bb48629b73246 | 679,307 |
import unicodedata
def remove_accentuated(s):
"""Removes accentuated chars and lower case
"""
s = ''.join(c for c in unicodedata.normalize('NFD', s.lower()) if unicodedata.category(c) != 'Mn')
return s | 7d17863f3ead81ff4839f897abe6f9f69ae3a6ad | 679,308 |
def get_filters(filters):
"""Return the rsync options for the given filters."""
arguments = []
for filter_ in filters:
if len(filter_) > 1:
raise Exception(
"Filter must contain only one entry: {}".format(filter_))
if "exclude" in filter_:
argumen... | 7704c7fe440fe93c392a8a1d37d31f336eb3e5b0 | 679,309 |
def _replace_words(replacements, string):
"""Replace words with corresponding values in replacements dict.
Words must be separated by spaces or newlines.
"""
output_lines = []
for line in string.split('\n'):
output_words = []
for word in line.split(' '):
new_word = repla... | db7afda0aeece6d40a2f6f89c4de52afd68ca87e | 679,310 |
def continue_crawl(search_history, target_url, max_steps = 25):
"""
Determines whether or not we should keep crawling
search_history: is a list of strings which are urls of Wikipedia
articles. The last item in the list is the most
recently found url.
... | 1d7aef1ac18e186c4b60b317386d51490b01470a | 679,311 |
def odd(number):
""" Returns True if number is odd """
return number % 2 == 1 | 5f8030f59f48b0c5662006a7facfe6f4c174a743 | 679,317 |
def reverse(current_block, *args):
"""Reverses the data of the current block."""
return current_block[::-1] | 1e7c2529123fd97916120ed53b46f819642ea586 | 679,318 |
from typing import Optional
def frame_index_to_seconds(
frame_index: int, fps: int, zero_indexed: Optional[bool] = True
) -> float:
"""Converts a frame index within a video clip to the corresponding
point in time (in seconds) within the video, based on a specified frame rate.
Args:
frame_inde... | 5cceab326ee446825f758b1d717d41775c769c10 | 679,319 |
def escape_special(v):
""" Escape literal bools and None as strings.
"""
if v is True or v is False or v is None:
return str(v)
else:
return v | 72666184a65fbdf8aaa4371945e127128729db39 | 679,320 |
def tuple_set(base, values, indices):
"""
Creates a new tuple with the given values put at indices and otherwise the same as base. The
list of indices must be in sorted order.
"""
new = base[:indices[0]]
for i in range(len(indices)-1):
new += (values[i],) + base[indices[i]+1:indices[i+1]... | 6f594cf189d71754e024f4770ad516f889ad7921 | 679,322 |
def alter_context(context):
""" Modify the context and return it """
# An extra variable
context['ADD'] = '127'
return context | 286e18ae53c1849fb0fcd4231cd0e125d2ecc2ee | 679,323 |
def get_all_context_names(context_num):
"""Based on the nucleotide base context number, return
a list of strings representing each context.
Parameters
----------
context_num : int
number representing the amount of nucleotide base context to use.
Returns
-------
a list of st... | 94de6fbad73d25ef242b4e2f6b0f378baa17aaf4 | 679,326 |
def is_valid_ip(ip):
"""
Check if IP address is valid
:param ip: IP address
:return: Is valid
"""
ip = ip.split(".")
if len(ip) != 4:
return False
return all([0 <= int(t) <= 255 for t in ip]) | 35ae6f11928cabacb61611526c3cff26b179c1a4 | 679,329 |
def split_rows_by_condition(df, mask):
"""Split dataframe based on logical indexes (that could come from a condition).
Args:
df (pd.DataFrame): Dataframe.
mask (pd.Series): Series with boolean indexes (could come from a condition).
Returns:
list: List of split dataframes.
... | 6241207ae59c76d2105af3dd90c8673b8c8ba166 | 679,330 |
def celcius(temperature):
"""Converts a temperature from degrees Kelvin to degrees Celcius."""
return temperature - 273.15 | 71dd3704ecba33cfec39e7b62bdc9b7b8ef7160d | 679,332 |
def mplt_bars(ax, ticks, values, colors, ylabel=None, title=None):
"""Quick function for creating stacked matplotlib barplot"""
bar0 = ax.bar(ticks, values[0], color=colors[0])
bar1 = ax.bar(ticks, values[1], bottom=values[0], color=colors[1])
if ylabel is not None:
ax.set_ylabel(ylabel)
if ... | 2ab89435eee20aeedbaf7232fc5fc297a26e4e48 | 679,336 |
def _tag_tuple(revision_string):
"""convert a revision number or branch number into a tuple of integers"""
if revision_string:
t = [int(x) for x in revision_string.split('.')]
l = len(t)
if l == 1:
return ()
if l > 2 and t[-2] == 0 and l % 2 == 0:
del t[-2]
return tuple(t)
return (... | e6f1b6111dd5e95ba9009e6593d580afa7203c0b | 679,337 |
def indentLevel(line):
"""Returns the indentation level of a line, defined in Piklisp as the number of leading tabs."""
for i in range(len(line)):
if line[i] != "\t":
return i # i characters were "\t" before lines[i]
return None | b95c1adf499336ef33e68bfc89daf2b34d2013da | 679,339 |
def marker_cell_identifier(marker_region, cells):
"""Return cell identifier of marker region."""
pos = marker_region.convex_hull.centroid
return cells[pos] | dae7848f2dd99e942925fdfda625a445912a065b | 679,341 |
def get_synapse_data_by_contin(cur,contin):
"""
Returns synapse data for given contin
Each row is a single section of the synapse
Row format: [section_number,preobj,[post_obj]]
Parameters:
-----------
cur : MySQLdb cursor
contin : str
Contin number
"""
sql = ("select IMG... | 206feed563ab32324cbbe48e6c69389f811c741c | 679,342 |
def add_id_to_dict(doc):
""" Adds the document's id to the document's fields dictionary.
"""
full_dict = doc.to_dict()
full_dict['id'] = doc.id
return full_dict | 3626ee822817fde648e46fcd6862dc689cc20c5a | 679,346 |
def s_to_b(s: str) -> bytes:
"""convert string to bytes
:param s: input string
:type s: str
:return: output bytes
:rtype: bytes
"""
b = s.encode('utf8')
return b | 8affe4850d40754e2c9338e270c9813edad2797b | 679,347 |
def match_to_int(match):
"""Returns trace line number matches as integers for sorting.
Maps other matches to negative integers.
"""
# Hard coded string are necessary since each trace must have the address
# accessed, which is printed before trace lines.
if match == "use-after-poison" or match ==... | c2daab64bc4a2ae258b7ac6152a949b48d8d7906 | 679,348 |
def to_pascal_case(string):
"""Convert from snake_case to PascalCase
Using the standard library `title` doesn't help as it changes everything after the first letter to lowercase, we
want the following:
- API -> API
- api_key -> ApiKey
- user -> User
- paginated_response(account) -> Paginate... | 0ef2b2f7aeb7f550026df25cd060a5313a3017a5 | 679,350 |
def related_to_hardware(cpes):
"""
Return True if the CVE item is related to hardware.
"""
for cpe in cpes:
cpe_comps = cpe.split(":")
# CPE follow the format cpe:cpe_version:product_type:vendor:product
if len(cpe_comps) > 2 and cpe_comps[2] == "h":
return True
r... | 7072255239be18589ff2d9abcb0aca721478c5f5 | 679,351 |
def map_range(x, in_min, in_max, out_min, out_max):
"""Map Value from one range to another."""
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min | aa1765d29eaa9527cb03733b3b2e7548ccdf1fec | 679,354 |
def compare_dicts(file, src_test_dict, infer_dict):
"""
Check if a particular file
exists in the source/test dict and infer dict
If file exists, decrement the counter in
both dictionaries
Args:
file: file potentially not analyzed by infer
src_test_dict: dictionary contain... | e8ca69d85cfb18be8e92ed6c39eeca46e71a0598 | 679,355 |
def parse_requirements(fname):
"""Read requirements from a pip-compatible requirements file."""
with open(fname):
lines = (line.strip() for line in open(fname))
return [line for line in lines if line and not line.startswith("#")] | 9b27d34324b5ea304ff5c66289193640a385f393 | 679,356 |
import math
def actual_pressure(temperature, pressure, height=0.0):
"""
Convert the pressure from absolute pressure into sea-level adjusted
atmospheric pressure.
Uses the barometric formula.
Returns the mean sea-level pressure values in hPa.
"""
temperature = temperature + 273.15
press... | 5743ee84d007f399ff706bf1a4c022cda50840d0 | 679,357 |
import re
def get_emails(s):
"""Returns first matched email found in string s."""
# Removing lines that start with '//' because the regular expression
# mistakenly matches patterns like 'http://foo@bar.com' as '//foo@bar.com'.
# Adopted from code by Dennis Ideler ideler.dennis@gmail.com
regex = re... | 624aa54b0fe7f118d7c9b1101314605c80ac7080 | 679,360 |
def update_counter(galini, name, amount, initial_value=0.0):
"""Update a counter, creating the gauge if it does not exists."""
telemetry = galini.telemetry
counter = telemetry.get_counter(name)
if counter is None:
counter = telemetry.create_counter(name, initial_value)
return counter.increme... | a0f325e798b84362596fc6662b295ad0ee378d22 | 679,364 |
def optional(cls):
"""
Returns a Converter for a type which can optionally be missing (represented by None).
"""
def converter(string_or_instance):
if string_or_instance is None or isinstance(string_or_instance, cls):
return string_or_instance
if string_or_instance.strip() ... | c76dd740a74d7aa0ef2dabd290399f0c2cd65825 | 679,366 |
def extract_name(in_line: str) -> str:
"""
Extracts the name from the type construct. Information that is not needed will be removed
such as the stereotype.
"""
types = {'package', 'class', 'abstract', 'interface',
'enum', 'abstract class', 'entity'}
process_type: str = ''
for i... | ff115509c58a83df139a83cdc0473c2841c7dc5a | 679,367 |
def _info_from_first_line(line):
"""
Gets the info from the first line of landmarks' txt. The format of the file
is hardcoded for now, e.g. the expected numbers and fields.
It returns a dictionary, which enables future extensions of what is returned.
Along with the functions _from_line_to_vec, from... | b38f19b0bfcca7de6661e1ea929a5ef8017837ad | 679,374 |
def database_test_url() -> str:
"""
generate in memory sqlite db connect url for test purposes
:return: url string for test database connection
"""
return "sqlite+aiosqlite://?cache=shared" | 6aab90ad759ee632ce83e5f110f6e4decaf13aa9 | 679,376 |
def get_longest_repeating_substring(input_string: str) -> str:
"""
Algorithm for getting the longest repeating substring from a string
Complexity --> O(N)
:param input_string: str
:return longest_substring: str
"""
longest_substring = ""
local_longest_substring = input_string[0]
... | dba6251b500d7d1cbe76af4052dd183c1769f579 | 679,388 |
def count_mol_weights(mol_weights):
"""
Count the number of weights in each bin, and
return a list of counts.
"""
counts = [0 for _ in range(9)]
for mol_weight in mol_weights:
if mol_weight < 250:
counts[0] += 1
elif 250 <= mol_weight < 300:
counts[1... | 59e1e79aba9a07843091364c34b56e59a5a69106 | 679,389 |
import bisect
def bottom_values(values, period=None, num=1):
"""Returns list of bottom num items.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param num: the num in the b... | 7eb80b58ed2cf7c7c005417414aedc66c046f6c9 | 679,390 |
def safemax(data, default=0):
""" Return maximum of array with default value for empty array
Args:
data (array or list ): data to return the maximum
default (obj): default value
Returns:
object: maximum value in the array or the default value
"""
if isinstance(data, list):
... | 1d5a2ea3a6a498cb7be03ec54c3bffef75903444 | 679,391 |
import tqdm
def _pbar(x):
"""Create a tqdm progress bar"""
return tqdm.tqdm(x, ascii=True, unit=" scans") | 182ec56e05a5b7f845320add429e01065f952fcd | 679,394 |
def is_yaml(file_path: str) -> bool:
"""Returns True if file_path is YAML, else False
Args:
file_path: Path to YAML file.
Returns:
True if is yaml, else False.
"""
if file_path.endswith("yaml") or file_path.endswith("yml"):
return True
return False | a466e019aa2f59adb3412d2424c1e7e6bb8b2317 | 679,395 |
import hashlib
def sha256_file(filename):
"""Calculate sha256 hash of file
"""
buf_size = 65536 # lets read stuff in 64kb chunks!
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(buf_size)
if not data:
break
... | dd5da4635281d1742cf864e39eea831920da4359 | 679,398 |
import re
def extract_thread_list(trace_data):
"""Removes the thread list from the given trace data.
Args:
trace_data: The raw trace data (before decompression).
Returns:
A tuple containing the trace data and a map of thread ids to thread names.
"""
threads = {}
parts = re.split('USER +PID +PPID ... | 1e297e65bf33b733fe6453a62e124449e8a6af11 | 679,401 |
def getDuration(timesarr):
"""
gets the duration of the input time array
:param timesarr: array of times as returned by readCSV
:returns: Duration for which there are times
"""
dur = max(timesarr)-min(timesarr)
return dur | 379e236713902d506856296734b64fde0623aa17 | 679,404 |
def landsat_ts_norm_diff(collection, bands=['Green', 'SWIR1'], threshold=0):
"""Computes a normalized difference index based on a Landsat timeseries.
Args:
collection (ee.ImageCollection): A Landsat timeseries.
bands (list, optional): The bands to use for computing normalized difference. Defaul... | a9026927a69a9c7b0a5551e9f9d71799f50d9115 | 679,410 |
import json
def fromJSON(json_file):
"""load json with utf8 encoding"""
with open(str(json_file),"r", encoding='utf8') as fp:
return json.load(fp) | f18a272387609a5eb7f6eb7a04248f55c2466db0 | 679,412 |
def get_node_attributes(G, name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],col... | fe1a559179e11feeee532ad7e2c737f40190169e | 679,415 |
def calc_downsample(w, h, target=400):
"""Calculate downsampling value."""
if w > h:
return h / target
elif h >= w:
return w / target | c0c8f5296b03750bdde25b0bcc0350d69017b0b6 | 679,417 |
import re
def camel_case_split(text: str) -> list:
"""camel_case_split splits strings if they're in CamelCase and need to be not Camel Case.
Args:
str (str): The target string to be split.
Returns:
list: A list of the words split up on account of being Camel Cased.
"""
return re.... | bcbac4f7ba01d133b84fb1275743bea278d64c4c | 679,420 |
def hex_to_bytes(data):
"""Convert an hex string to bytes"""
return bytes.fromhex(data) | aa9f7d5c6d66bccc5fb5bf7ba51bcc075b8580bc | 679,421 |
import textwrap
def multiline_fix(s):
"""Remove indentation from a multi-line string."""
return textwrap.dedent(s).lstrip() | 9cb964eb88ebcaadc00acc222d174d6a320c44cf | 679,426 |
def oc2_pair(actuator_nsid, action_name, target_name):
"""Decorator for your Consumer/Actuator functions.
Use on functions you implement when inheriting
from OpenC2CmdDispatchBase.
Example:
class MyDispatch(OOpenC2CmdDispatchBase):
...
@oc2_pair('slpf', 'deny', 'ipv4_connection')
... | 211c8e22cb052a0117f161f4b90265744023a86c | 679,428 |
def round_next (x,step):
"""Rounds x up or down to the next multiple of step."""
if step == 0: return x
return round(x/step)*step | 212687037567b37f6f62a9dbc3427fe05045476f | 679,432 |
def trim_seq(seq):
"""
Remove 'N's and non-ATCG from beginning and ends
"""
good = ['A','T','C','G']
seq = seq.upper()
n = len(seq)
i = 0
while i < n and seq[i] not in good: i += 1
j = len(seq)-1
while j >= 0 and seq[j] not in good: j -= 1
return seq[i:j+1] | d9fc891c9bef536ad94bcd58eafa212dc40387c8 | 679,434 |
def _extract_patch(img_b, coord, patch_size):
""" Extract a single patch """
x_start = int(coord[0])
x_end = x_start + int(patch_size[0])
y_start = int(coord[1])
y_end = y_start + int(patch_size[1])
patch = img_b[:, x_start:x_end, y_start:y_end]
return patch | 594985f32f366a2613302611868efd7a75582d6c | 679,435 |
def do_sql(conn, sql, *vals):
"""
issue an sql query on conn
"""
# print(sql, vals)
return conn.execute(sql, vals) | 1c14cc6e411cda5080bed7ef51e001b4eb1f6d02 | 679,439 |
from typing import Iterable
def sigma(a: Iterable[int]) -> int:
"""Returns sum of the list of int"""
return sum(a) | e9e55dd4968cd6c94ab62c5fb1be97687e5c9571 | 679,442 |
def merge(d1, d2):
"""
Merge two dictionaries. In case of duplicate keys, this function will return values from the 2nd dictionary
:return dict: the merged dictionary
"""
d3 = {}
if d1:
d3.update(d1)
if d2:
d3.update(d2)
return d3 | 0cedda124ab6ee310ee3df20784cd05d468b0612 | 679,447 |
def obs_to_dict(obs):
"""
Convert an observation into a dict.
"""
if isinstance(obs, dict):
return obs
return {None: obs} | 0cedf58ce849b3663ac0f9b2c6036ced09815d5d | 679,449 |
import math
def convert_state_to_hex(state: str) -> str:
"""
This assumes that state only has "x"s and Us or Ls or Fs or Rs or Bs or Ds
>>> convert_state_to_hex("xxxU")
'1'
>>> convert_state_to_hex("UxUx")
'a'
>>> convert_state_to_hex("UUxUx")
'1a'
"""
state = (
stat... | d722e07ab69c6b46f834eca04c7a8ba75520b145 | 679,457 |
def _lat_hemisphere(latitude):
"""Return the hemisphere (N, S or '' for 0) for the given latitude."""
if latitude > 0:
hemisphere = 'N'
elif latitude < 0:
hemisphere = 'S'
else:
hemisphere = ''
return hemisphere | 17befd8edcedb25575dabe0e5b950debafcdf1ca | 679,466 |
from typing import List
from typing import Tuple
import operator
def _get_longest_palindrome_boundaries(lps_table: List[int]) -> Tuple[int, int]:
"""Returns real text longest palindrome boundaries based from its lps table.
"""
center_index, radius = max(enumerate(lps_table), key=operator.itemgetter(1))
... | 7c563ee7dda45a85ad3f001b56bd5ed36d241f2c | 679,469 |
def parse_word(word):
"""
Split given attribute word to key, value pair.
Values are casted to python equivalents.
:param word: API word.
:returns: Key, value pair.
"""
mapping = {'yes': True, 'true': True, 'no': False, 'false': False}
_, key, value = word.split('=', 2)
try:
... | 89a845c5dbc1f64c2d7c65145535f5b62b06b940 | 679,471 |
def merge_sort(nsl: list) -> list:
"""
function sorts an array by a merge method.
:param nsl: type list: non sorted list
:return: type list: list after merge sort
"""
sl = nsl[:]
n = len(nsl)
if n < 2:
return sl
else:
left_arr = merge_sort(nsl=nsl[:n//2])
rig... | bc6aacda515ac1b8509e1db29974a003b7739903 | 679,472 |
import json
def read_json(filename):
"""
Deserializes json formatted string from filename (text file) as a dictionary.
:param filename: string
"""
with open(filename) as f:
return json.load(f) | baec461293e45613126fa29648f4c8a83b49e719 | 679,475 |
def redshift2dist(z, cosmology):
""" Convert redshift to comoving distance in units Mpc/h.
Parameters
----------
z : float array like
cosmology : astropy.cosmology instance
Returns
-------
float array like of comoving distances
"""
return cosmology.comoving_distance(z).to('Mpc'... | 105f665ac6046692c3ec4652201bce63246dd6a9 | 679,476 |
def first_half(dayinput):
"""
first half solver:
Starting with a frequency of zero, what is the resulting
frequency after all of the changes in frequency have been applied?
"""
lines = dayinput.split('\n')
result = 0
for freq in lines:
result += int(freq)
return result | 3ae205a8347086e9d411acdf8b72a1ce0b390655 | 679,479 |
def reorder(x, indexList=[], indexDict={}):
"""
Reorder a list based upon a list of positional indices and/or a dictionary of fromIndex:toIndex.
>>> l = ['zero', 'one', 'two', 'three', 'four', 'five', 'six']
>>> reorder( l, [1, 4] ) # based on positional indices: 0-->1, 1-->4
['one', 'f... | 9649c5392657fe6ab329fbb1b829ffc3bae19543 | 679,481 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.