content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def calc_plan(plan, beam_set, norm):
"""Calculate and normalize treatment plan.
Parameters
----------
plan : connect.connect_cpython.PyScriptObject
Current treatment plan.
beam_set : connect.connect_cpython.PyScriptObject
Current beam set.
norm : (str, float, float)
Region of interest, dose, and volume used for normalization.
Returns
-------
int
RayStation exit status:
- 0: success
- 1: normalization failed
- 2: optimization failed
"""
# Calculate plan
plan.PlanOptimizations[0].ResetOptimization()
try:
plan.PlanOptimizations[0].RunOptimization()
except:
return 2
# Normalize plan
try:
beam_set.NormalizeToPrescription(
RoiName=norm[0],
DoseValue=norm[1],
DoseVolume=norm[2],
PrescriptionType='DoseAtVolume')
return 0
except:
return 1 | e37f09da88ae9092f8b6b89f8f76811ead93d354 | 687,202 |
import hashlib
def sha256sum(body):
"""Get a SHA256 digest from a string."""
h = hashlib.sha256()
if body:
h.update(body)
return h.hexdigest() | 4fcbd9b6d020f5a52fc7ee80343f623c059dff1d | 687,203 |
from typing import Optional
from typing import List
import pkg_resources
def get_version(
package_name: str,
raise_on_error: bool,
alternate_package_names: Optional[List[str]] = None,
) -> Optional[str]:
"""
:param package_name: The name of the full package, as it would be imported,
to get the version for
:type package_name: str
:param raise_on_error: True to raise an error if package is not installed
or couldn't be imported, False to return None
:type raise_on_error: bool
:param alternate_package_names: List of alternate names to look for the package
under if package_name is not found. Useful for nightly builds.
:type alternate_package_names: Optional[List[str]]
:return: the version of the desired package if detected, otherwise raises an error
:rtype: str
"""
current_version: Optional[str] = None
version_err = None
try:
current_version = pkg_resources.get_distribution(package_name).version
except Exception as err:
version_err = err
if version_err and alternate_package_names:
next_package = alternate_package_names.pop()
return get_version(next_package, raise_on_error, alternate_package_names)
if version_err and raise_on_error:
raise ImportError(
f"error while getting current version for {package_name}: {version_err}"
)
return current_version if not version_err else None | da11011a40b4cdfb4a9ebfc302b3a76adc2ccff9 | 687,206 |
def get_simulation(open_rocket_helper, ork_file_path, i_simulation):
"""Return the simulation with the given index from the .ork file.
:arg open_rocket_helper:
Instance of ``orhelper.Helper()``
:raise IndexError:
If `i_simulation` is negative or >= the number of simulations in
the given .ork file
"""
doc = open_rocket_helper.load_doc(ork_file_path)
n_simulations = doc.getSimulationCount()
if i_simulation < 0 or i_simulation >= n_simulations:
raise IndexError(
"Simulation index is out of bounds!\n"
+ "i_simulation = {}\n".format(i_simulation)
+ "n_simulations = {}\n".format(doc.getSimulationCount()))
sim = doc.getSimulation(i_simulation)
print("Load simulation number {} called {}.".format(
i_simulation, sim.getName()))
return sim | 5b4d049a8437f3de4e2d5a2a0ff8786c392e6633 | 687,210 |
def within_time_period(t, time_period):
"""
Check if time is in the time period
Argument:
t = given time in format (half, min, sec)
time_period = tuple of (start_time, end_time) in format (half, min, sec)
Return:
boolean
"""
start_time = time_period[0]
end_time = time_period[1]
assert start_time[0] == end_time[0], "TIME PERIOD NOT IN THE SAME HALF !"
if t[0] == start_time[0]:
t_sec = t[1]*60+t[2]
start_sec = start_time[1]*60 + start_time[2]
end_sec = end_time[1]*60 + end_time[2]
if t_sec >= start_sec and t_sec <= end_sec:
return True
return False | b996ed5b2d49c95faebae558bf064df02dde4867 | 687,211 |
def compute_agreement_score(arcs1, arcs2, method, average):
"""Agreement score between two dependency structures
Parameters
----------
arcs1: list[(int, int, str)]
arcs2: list[(int, int, str)]
method: str
average: bool
Returns
-------
float
"""
assert len(arcs1) == len(arcs2)
if method == "joint":
shared_arcs = set(arcs1) & set(arcs2)
score = float(len(shared_arcs))
elif method == "independent":
score = 0.0
dep2head1 = {d: (h, r) for h, d, r in arcs1}
dep2head2 = {d: (h, r) for h, d, r in arcs2}
for dep in dep2head1.keys():
head1, rel1 = dep2head1[dep]
head2, rel2 = dep2head2[dep]
if head1 == head2:
score += 0.5
if rel1 == rel2:
score += 0.5
else:
raise Exception("Never occur.")
if average:
score = float(score) / len(arcs1)
return score | a2f19101ea246ccd317d7519214d103842b83d33 | 687,213 |
def read_image(file_path):
"""
Read an image file
"""
with open(file_path, "rb") as ifile:
return ifile.read() | 47305d81ecc9c7086397c7def765b54b5bb0fca7 | 687,214 |
def manhattan_distance(params):
"""
Manhattan distance from current position to target
Args:
current (tuple): x, y coordinates of the current position
target (tuple): x, y coordinates of the target
Returns:
(float): Manhattan distance from current position to target
"""
current, target, solution = params
if not solution:
return -100
dist = abs(target[0] - current[0]) + abs(target[1] - current[1])
target_reached = dist == 0
return -dist + (100 * target_reached) | 66550268523d07cf0595230648765eed917e8814 | 687,215 |
def row_normalize(x):
"""
Scale a matrix such that each row sums to one
"""
return x / x.sum(axis=1)[:, None] | f2d2361aba7a208d6e8a1939a28de9dff56c6cfc | 687,217 |
import re
def error_has_gloss(value):
"""Checks if the value has at least a hyphen followed by two capital letters"""
return bool(re.match(r'.*\-[A-Z]{2,}', value)) | 51b390d4a5b21577f305db1e68b436af37324d39 | 687,219 |
def argmin(arr, f):
"""Return the index, i, in arr that minimizes f(arr[i])"""
m = None
i = None
for idx, item in enumerate(arr):
if item is not None:
if m is None or f(item) < m:
m = f(item)
i = idx
return i | de833012061eddbdd25146be0fb447956eb76e11 | 687,220 |
def divide_toward_zero(x, y):
"""Divides `x` by `y`, rounding the result towards zero.
The division is performed without any floating point calculations.
For exmaple:
divide_toward_zero(2, 2) == 1
divide_toward_zero(1, 2) == 0
divide_toward_zero(0, 2) == 0
divide_toward_zero(-1, 2) == 0
divide_toward_zero(-2, 2) == -1
Args:
x (int): The numerator of the division.
y (int): The denominator of the division.
Returns:
The int result rounded towards 0.
"""
return (x // y) if (x * y) > 0 else ((x + (-x % y)) // y) | 87411f1916d8272cee4286ac1dc5ec42a40cf1ef | 687,232 |
def comma_join(fields):
"""
Converts everything in the list to strings and then joins
them with commas.
"""
return ",".join(map(str, fields)) | f470fe43fce06edfab3ec715a1ea6bfa6a2e431f | 687,233 |
def sort_x_by_y(x, y):
"""Sort the iterable x by the order of iterable y"""
x = [x for (_, x) in sorted(zip(y, x))]
return x | e026d1466b1bf6b7779dceb86627d169e86ef5a7 | 687,234 |
import psycopg2
def _execute_psyco(command, **kwargs):
"""
executes a postgres commandline through psycopg2
:param command: A psql command line as a str
:param kwargs: will be forwarded to psycopg2.connect
"""
# Note: Ubuntu 18.04 uses "peer" as the default postgres configuration
# which allows connections only when the unix user matches the database user.
# This restriction no longer applies for IPv4/v6-based connection,
# when specifying host=localhost.
if kwargs.get('host') is None:
kwargs['host'] = 'localhost'
output = None
with psycopg2.connect(**kwargs) as conn:
conn.autocommit = True
with conn.cursor() as cursor:
cursor.execute(command)
if cursor.description is not None:
output = cursor.fetchall()
# see http://initd.org/psycopg/docs/usage.html#with-statement
conn.close()
return output | 5529a000e2cb69cbc8fa6240b960405ceab59a47 | 687,235 |
def pair(s1, s2, track_id_pairs):
"""Returns pairs of tracks, i.e., tracks that can be compared.
s1 -- all tracks of sensor 1.
s2 -- all tracks of sensor 2.
track_id_pairs -- ID pairs of tracks of s1 and s2.
"""
pairs = []
# collect available ids of the sensor's tracks
for tid1, tid2 in track_id_pairs:
try:
t1 = s1[tid1]
t2 = s2[tid2]
pairs.append([t1, t2])
except Exception as e:
# tracks may vanish from view, ignore
# (pair gets unused at some point)
pass
return pairs | 839b03c2fbdaf46f129d8e5b6bb44e4ea70b56b2 | 687,237 |
import functools
def lsp_rpc(f):
"""A decorator for LanguageServerProtocol-methods. This wrapper
filters out calls that are made before initializing the server and
after shutdown and returns an error message instead.
This decorator should only be used on methods of
LanguageServerProtocol-objects as it expects the first parameter
to be a the `self`-reference of this object.
All LSP-methods should be decorated with this decorator except
initialize and exit
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
self = args[0]
except IndexError:
self = kwargs['self']
if self.shared.shutdown:
return {'code': -32600, 'message': 'language server already shut down'}
elif not self.shared.initialized:
return {'code': -32002, 'message': 'language server not initialized'}
else:
return f(*args, **kwargs)
return wrapper | 7bfbdc99d283eb44716bde44fb86bf64a82f662a | 687,238 |
import json
from typing import OrderedDict
def validate_payload(payload):
"""Validate that the payload is of type OrderedDict.
If the payload is of type str, then it assumes that the string is
able to be parsed via json.
Args:
payload (str or OrderedDict): Payload object
Returns:
OrderedDict: Original payload object as an OrderedDict.
"""
if isinstance(payload, str):
payload = json.JSONDecoder(
object_pairs_hook=OrderedDict
).decode(payload)
if not isinstance(payload, OrderedDict):
raise TypeError("Payload must be of type OrderedDict.")
return payload | 373561d9ce7df14625e6f40201ccb1a5b81a80d7 | 687,240 |
def clean_cluster_seq_id(id):
"""Returns a cleaned cd-hit sequence id
The cluster file has sequence ids in the form of:
>some_id...
"""
return id[1:-3] | b690d7472b1fb90743be27fed9ce9ef7c3b06694 | 687,241 |
def longVal(x):
""" longVal(x):
if 'x' is a z3 constant (i.e. function of arity 0) whose value is an integer,
then return that integer as a python long
else return 'None'"""
if(hasattr(x, 'as_long')): return x.as_long()
elif(hasattr(x, 'numerator_as_long')):
if(x.denominator_as_long() == 1): return x.numerator_as_long()
return None | 277b1abcc13a7223e723acd5e3694d87d8a8e831 | 687,244 |
def bytes_to_str(bytes, base=2, precision=0):
"""Convert number of bytes to a human-readable format
Arguments:
bytes -- number of bytes
base -- base 2 'regular' multiplexer, or base 10 'storage' multiplexer
precision -- number of decimal places to output
Returns:
Human-readable string such as '1.32M'
"""
if base == 2:
multiplexer = 1024
elif base == 10:
multiplexer = 1000
else:
return None # raise error
precision_string = '%.' + str(precision) + 'f'
mebi_convert = True
if bytes >= (multiplexer ** 4):
terabytes = float(bytes / (multiplexer ** 4))
output = (precision_string % terabytes) + 'T'
elif bytes >= (multiplexer ** 3):
gigabytes = float(bytes / (multiplexer ** 3))
output = (precision_string % gigabytes) + 'G'
elif bytes >= (multiplexer ** 2):
megabytes = float(bytes / (multiplexer ** 2))
output = (precision_string % megabytes) + 'M'
elif bytes >= (multiplexer ** 1):
kilobytes = float(bytes / (multiplexer ** 1))
output = (precision_string % kilobytes) + 'K'
else:
output = (precision_string % float(bytes)) + 'B'
mebi_convert = False
# mebibytes and gibibytes all those weird HDD manufacturer terms
if base == 10 and mebi_convert:
num, base = output[:-1], output[-1]
output = num + base.lower() + 'B'
return output | 5a00d58fa8149f44c41d0a1ab30535ff4c2c7d86 | 687,249 |
import textwrap
def _wrap(content, indent_level):
"""wrap multiple lines keeping the indentation"""
indent = ' ' * indent_level
wrap_opt = {
'initial_indent': indent,
'subsequent_indent': indent,
}
lines = []
for paragraph in content.splitlines():
if not paragraph:
lines.append('')
continue
lines.extend(textwrap.wrap(paragraph, **wrap_opt))
return lines | 9defcc8b216fb8a15b6c796fbf5d5206510db117 | 687,251 |
def fwd_slash(file_path):
"""Ensure that all slashes are '/'
Args:
file_path (st|Path): The path to force '/'
Returns:
(str): Formatted path
"""
return str(file_path).replace("\\", "/") | 750dab03c5d3a0783a60151c7d4a1c13fff92ef5 | 687,252 |
def flatten_sub(sub, game_id):
"""Flatten the schema of a sub"""
sub_id = sub[0]
sub_data = sub[1]
return {'game_id': game_id,
'sub_id': sub_id,
'sub_type': sub_data['type'],
'time_of_event(min)': (sub_data['t']['m'] + (sub_data['t']['s'] / 60 )),
'team_id': sub_data['team'],
'player_off': float(sub_data['offId']),
'player_on': float(sub_data['inId'])
} | d6557b01f6f2f11c829e1094b318bf2587c74977 | 687,257 |
def remove_by_idxs(ls, idxs):
"""Remove list of indexes from a target list at the same time"""
return [i for j, i in enumerate(ls) if j not in idxs] | 65c3c604d188cb8c7ed1ccef55d25b49c5412797 | 687,259 |
def gcd(a, b):
""" Find GCD(a, b)."""
# GCD(a, b) = GCD(b, a mod b).
while b != 0:
# Calculate the remainder.
remainder = a % b
# Calculate GCD(b, remainder).
a = b
b = remainder
# GCD(a, 0) is a.
return a | 5acf8086075dcd9e2d01b539d4abc9af6c98fe01 | 687,260 |
def _max_factor(n, factor, max_size):
""" Return the largest factor within the provided max;
e.g., the most images of size n thet can fit in max_size
"""
if max_size is None or n * factor <= max_size:
return factor
return max_size // n | f55e68ecba5ea3f47b7c644ab8e81182fcec6ccf | 687,263 |
from typing import Tuple
import math
def get_new_coordinates(curr_x: float, curr_y: float, angle: float, speed: float) -> Tuple[float, float]:
"""
Works out the next x, y coordinate given the current coordinate, an angle (from the x axis in radians) and the speed
(i.e distance of travel).
:param curr_x:
:param curr_y:
:param angle:
:param speed:
:return: coordinate
"""
new_x = curr_x + speed * math.cos(angle)
new_y = curr_y + speed * math.sin(angle)
return new_x, new_y | 17ba8f98c78ed998dc3fc325214c4bbd79ac6b31 | 687,264 |
def move(cm, from_start, from_end, insert_pos):
"""
Move rows from_start - from_end to insert_pos in-place.
Examples
--------
>>> cm = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 0, 1], [2, 3, 4, 5]])
>>> move(cm, 1, 2, 0)
array([[5, 6, 4, 7],
[9, 0, 8, 1],
[1, 2, 0, 3],
[3, 4, 2, 5]])
"""
assert insert_pos < from_start or insert_pos > from_end
if insert_pos > from_end:
p_new = (list(range(from_end + 1, insert_pos + 1)) +
list(range(from_start, from_end + 1)))
else:
p_new = (list(range(from_start, from_end + 1)) +
list(range(insert_pos, from_start)))
# print(p_new)
p_old = sorted(p_new)
# swap columns
cm[:, p_old] = cm[:, p_new]
# swap rows
cm[p_old, :] = cm[p_new, :]
return cm | 0a6e8be192bddbd9771943424e189d66092e9605 | 687,268 |
def evaluate_training_result(env, agent):
"""
Evaluates the performance of the current DQN agent by using it to play a
few episodes of the game and then calculates the average reward it gets.
The higher the average reward is the better the DQN agent performs.
:param env: the game environment
:param agent: the DQN agent
:return: average reward across episodes
"""
total_reward = 0.0
episodes_to_play = 100
for i in range(episodes_to_play):
trajectories, _ = env.run(is_training=True)
# calculate reward
episode_reward = 0.0
for ts in trajectories[0]:
# print(
# 'State: {}, Action: {}, Reward: {}, Next State: {}, Done: {}'.
# format(ts[0], ts[1], ts[2], ts[3], ts[4]))
episode_reward += ts[2]
total_reward += episode_reward
average_reward = total_reward / episodes_to_play
return average_reward | 4492d7af64174483d63af610e19ac45c6c1c63a5 | 687,270 |
import re
def checkBadFormat(pattern, id):
"""
Returns False if the format looks okay, True if it's not a match
"""
if (id == id):
if re.match(pattern, id):
return(False)
else:
return(True) | e3a7469817bb5428b02b9d33bc27d219cd618f06 | 687,271 |
def always_https(url):
""" ensures that urls are always using HTTPS
:param url: An URL (as string)
:type url: str
:return: The passed in URL with HTTPS
:rtrype: str
"""
if not url.startswith('http'):
return url
elif url.startswith('https'):
return url
else:
https_url = url.replace('http://', 'https://')
return https_url | 033797fcf125d5dc5b47f133ae42ed47c238b9ca | 687,272 |
def get_default_living(rows, cols):
"""If number of intitial living cells is not specified for the game,
calculate as a function of grid size."""
return round((rows * cols) / 4) | d9df2d820ab823f5ff9b675d3dc96bc8927bb2e5 | 687,275 |
def convert_temperature(val, old_scale="fahrenheit", new_scale="celsius"):
"""
Convert from a temperatuure scale to another one among Celsius, Kelvin
and Fahrenheit.
Parameters
----------
val: float or int
Value of the temperature to be converted expressed in the original
scale.
old_scale: str
Original scale from which the temperature value will be converted.
Supported scales are Celsius ['Celsius', 'celsius', 'c'],
Kelvin ['Kelvin', 'kelvin', 'k'] or Fahrenheit ['Fahrenheit',
'fahrenheit', 'f'].
new_scale: str
New scale from which the temperature value will be converted.
Supported scales are Celsius ['Celsius', 'celsius', 'c'],
Kelvin ['Kelvin', 'kelvin', 'k'] or Fahrenheit ['Fahrenheit',
'fahrenheit', 'f'].
Raises
-------
NotImplementedError if either of the scales are not one of the requested
ones.
Returns
-------
res: float
Value of the converted temperature expressed in the new scale.
"""
# Convert from 'old_scale' to Kelvin
if old_scale.lower() in ['celsius', 'c']:
temp = val + 273.15
elif old_scale.lower() in ['kelvin', 'k']:
temp = val
elif old_scale.lower() in ['fahrenheit', 'f']:
temp = 5.0 * (val - 32) / 9.0 + 273.15
else:
raise AttributeError(
f'{old_scale} is unsupported. Celsius, Kelvin and Fahrenheit are supported')
# and from Kelvin to 'new_scale'
if new_scale.lower() in ['celsius', 'c']:
result = temp - 273.15
elif new_scale.lower() in ['kelvin', 'k']:
result = temp
elif new_scale.lower() in ['fahrenheit', 'f']:
result = (temp - 273.15) * 9.0 / 5.0 + 32
else:
raise AttributeError(
f'{new_scale} is unsupported. Celsius, Kelvin and Fahrenheit are supported')
return result | 101725753efab755a78f77b96d74b5736e2ef21f | 687,276 |
def percent(num, div, prec=2):
"""
Returns the percentage of num/div as float
Args:
num (int): numerator
div (int): divisor
prec (None, int): rounding precision
Returns:
p (float)
p = 100 * num/div
"""
num = float(num)
div = float(div)
if div == 0:
return 0.0 # no division by zero
p = round(100 * num / div, prec)
return p | f8de7cb8a02ea3805774e0b7cc27086107f36a71 | 687,278 |
def slurp(path):
"""Returns the contents of the file at the given path."""
f = None
try:
f = open(path)
return f.read()
finally:
if f:
f.close() | d4ee16a0535d47f33bfe5c9480aed176cb928e46 | 687,279 |
def get_paralogs_data(paral_file):
"""Extract paralogous projections."""
if paral_file is None:
return set()
paral_proj = []
with open(paral_file, "r") as f:
paral_proj = set(x.rstrip() for x in f.readlines())
return paral_proj | 343e344b91a55b651d3177fa6d66f99e7291364d | 687,280 |
import json
import requests
def get_onboard_certificates(clearpass_fqdn, access_token, username):
"""Get all valid certificates (not revoked or expired) for user"""
url = "https://{}/api/certificate".format(clearpass_fqdn)
queryfilter = {'mdps_user_name': username, 'is_valid':'true'}
payload = {'filter':json.dumps(queryfilter),'calculate_count':'true'}
headers = {'Authorization':'Bearer {}'.format(access_token), "Content-Type": "application/json"}
try:
r = requests.get(url, headers=headers, params=payload)
r.raise_for_status()
json_response = r.json()
except Exception as e:
print(e)
exit(1)
certs = [(i['id'], i['ca_id']) for i in json_response['_embedded']['items']]
return certs | 868bfd1537eed04194abf254e75faaccd425be03 | 687,284 |
import time
def generate_stats_table(stats: dict) -> str:
"""Function to generate md table with questions stats.
Args:
stats: Stats dict.
{
"category": {
"title" str,
"cnt": int,
}
}
Returns:
Md table string.
"""
cnt_total = sum([v['cnt'] for v in stats.values()])
header = f"""## Questions categories
*Total number of questions as of {time.strftime('%Y-%m-%d', time.gmtime())}*: **{cnt_total}**
"""
table_body = "\n".join([f"|[{v['title']}](questions/{k}/)|{v['cnt']}|"
for k, v in stats.items()])
return f"""{header}\n
|Category|Number of questions|
|:-:|-:|
{table_body}""" | 7ca93107386b353f071f2e6cf6421e77f23ac1f0 | 687,286 |
def connex(data,K,L):
"""
Return the list of members of the connex component containing the elements
originaly in L.
Parameters:
-----------
data: pandas dataframe
data must contain at least one column named neighbors.
K: list
The list of members of the connex component so far.
L: list
The starting element, then the list of members of the connex component
reached so far minus the ones that are in K.
Returns:
--------
K: list
The junctions in the connex component containing the elements
originaly in L.
"""
if L == []:
K.sort()
return K
M = []
for elem in L:
M += data.at[elem,'neighbors_list']
N = []
for elem2 in M:
if not (elem2 in K):
N += [elem2]
N = list(set(N))
L = N
K += L
return connex(data,K,L) | a2f0faee0b6b7c3cec20451511ca7f635d22fe44 | 687,288 |
def get_variant_id(variant_dict):
"""Build a variant id
The variant id is a string made of CHROM_POS_REF_ALT
The alt field for svs needs some massage to work downstream.
Args:
variant_dict (dict): A variant dictionary
Returns:
variant_id (str)
"""
chrom = variant_dict['CHROM']
pos = variant_dict['POS']
ref = variant_dict['REF']
#There are several symbols in structural variant calls that make
#things hard. We will strip those symbols
bad_chars = "<>[]:"
alt = ''.join(c for c in variant_dict['ALT'] if c not in bad_chars)
return '_'.join([chrom,pos,ref,alt]) | 7b39b7c9003aacec3e76fd161f62d990748182ca | 687,289 |
import random
def mcpi_samples(n):
"""
Compute the number of points in the unit circle out of n points.
"""
count = 0
for i in range(n):
x, y = random.random(), random.random()
if x*x + y*y <= 1:
count += 1
return count | c6082155e6accc773be67ba35926bf33348b6fbf | 687,290 |
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
questions = [ex['question'] for ex in batch]
question_tokens = [ex['question_tokens'] for ex in batch]
answers = [ex['answer'] for ex in batch]
answer_tokens = [ex['answer_tokens'] for ex in batch] if 'answer_tokens' in batch[0] else None
docs_truth = [ex['doc_truth'] for ex in batch] if 'doc_truth' in batch[0] else None
return questions, question_tokens, answers, answer_tokens, docs_truth | e1e98eb03341629ffe149becb324b78ecc8566de | 687,291 |
def lv_unpack(txt):
"""
Deserializes a string of the length:value format
:param txt: The input string
:return: a list og values
"""
txt = txt.strip()
res = []
while txt:
l, v = txt.split(":", 1)
res.append(v[: int(l)])
txt = v[int(l):]
return res | e0390bb200515a595e7f177404fdfa44a11b1c7f | 687,292 |
def tmp_data_directory(tmp_path_factory):
"""Creates temporary directory and returns its path.
"""
return str(tmp_path_factory.mktemp("getdera")) | 310f41de97a4401280db8cc8309725e05c3aa267 | 687,297 |
def _Mabove(dat, surf_th):
"""Return total gas mass above threshold density surf_th."""
surf = dat.surf
M = surf.where(surf>surf_th).sum()*dat.domain['dx'][0]*dat.domain['dx'][1]
return M.values[()] | 30a1f71c8dac52bb2f776e9489a10061216d34e0 | 687,298 |
import string
import operator
def getFisrtCharThatAppearsOnce(myString):
""" Get the first char that appears once in the provided string.
Only alphabetic chars are considered. """
myString = "".join(myString.lower().split())
charDict = {key:[0, 0] for key in string.ascii_lowercase}
for pos, char in enumerate(myString):
charDict[char][0] += 1
charDict[char][1] = pos
charDict = {key:values for key, values in charDict.items() if values[0] == 1}
sortedCharDict = sorted(charDict.items(), key=operator.itemgetter(1))
strOut = sortedCharDict[0][0] if sortedCharDict else False
return strOut | 1658be27cdff59378afe8c92ff3a132337cd9307 | 687,301 |
def _strip_quotes(str_q):
"""
Helper function to strip off the ' or " off of a string
"""
if str_q[0] == str_q[-1] and str_q.startswith(("'", '"')):
return str_q[1:-1]
return str_q | df1b3eda68e913e2fa6ec30a0f59f8d7c9ef6fa7 | 687,302 |
def inverse2d(a):
"""
Returns the matrix inverse of 2x2 matrix "a".
:param a: The original 2x2 matrix.
:return: Its matrix inverse.
"""
result = [[0, 0], [0, 0]]
det_a = (a[1][1] * a[0][0]) - (a[0][1] * a[1][0])
for a_row in range(len(result)):
for a_col in range(len(result[a_row])):
result[a_row][a_col] = round(a[a_row][a_col] * (1/det_a), 6)
buffer = result[1][1]
result[0][1] *= -1
result[1][0] *= -1
result[1][1] = result[0][0]
result[0][0] = buffer
return result | b99fb5627b2136d28452d0864fe210d41f3ea81c | 687,304 |
def number2binary(v, dynamic_padding=False, padded_length=None):
""" Convert an integer value to the equivalent string of 1 and 0 characters. """
s = ""
while v:
s = [ "0", "1" ][v & 1] + s
v >>= 1
if dynamic_padding:
w = 4
while w < len(s):
w <<= 1
else:
w = len(s) if padded_length is None else padded_length
return "0"*(w-len(s)) + s | 2bf7a334f5c38be6c827254d7a3c3875b28c8a9f | 687,305 |
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
return {prop.name: prop.val for prop in propset} | e2f09d123f70e93dfec1066d01733f73fad5bc5f | 687,308 |
def strip_and_split(line):
"""
Helper function which saves a few lines of code elsewhere
:param line:
:return:
"""
line = line.strip().split()
stripped_line = [subline.strip() for subline in line]
return stripped_line | e4b395a0f33fd9fd3324eb8c62654623913f0aa1 | 687,309 |
import re
def first_upper(s):
"""Capitalizes the first letter, leaves everything else alone"""
return re.sub('([a-zA-Z])', lambda x: x.groups()[0].upper(), s, 1) | 5dd8ed4d019b1b094d24f255b2e35c070701fec7 | 687,314 |
def get_repo_information(repo_obj):
"""
Using the current repository, we obtain information on the repository, which includes
- owner_name
- repo_name
Requires for there to be a remote named "origin" in the current local repo clone
Returns a dict with said values. If it cannot find both, returns an empty dict
"""
url = repo_obj.remotes["origin"].url
colon_find = url.find(":")
dot_r_find = url.rfind(".")
if colon_find != -1 and dot_r_find != -1:
url = url[colon_find + 1 : dot_r_find]
results = url.split("/")
return {"owner_name": results[0], "repo_name": results[1]}
return dict() | b2bcaf5cae9b90d0311d22df9e4a1d28f92f9b73 | 687,317 |
import itertools
def to_combinations(list_of_objs: list):
"""
Create array of all combinations of a list of length n of the shape (1, 2, ..., n)
:param list_of_objs:
:return: Combinations
"""
combinations = []
for i in range(2, len(list_of_objs)):
combinations.extend(itertools.combinations(list_of_objs, i))
return combinations | 066369efbe6543da4f553ff937d4451a5480a8ee | 687,321 |
def fmt(x, pos):
"""
A utility function to improve the formatting of
plot labels
"""
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b) | 3fbcc50194f2ac5f71ca11fb52ec4a1283c571ca | 687,323 |
def is_binary(plist_path):
"""Checks if a plist is a binary or not."""
result = False
with open(plist_path, 'rb') as _f:
for _block in _f:
if b'\0' in _block:
result = True
break
return result | d14de5e8d85235f9013ac75141c36883e67b42d7 | 687,324 |
def validate_ppoi_tuple(value):
"""
Validates that a tuple (`value`)...
...has a len of exactly 2
...both values are floats/ints that are greater-than-or-equal-to 0
AND less-than-or-equal-to 1
"""
valid = True
while valid is True:
if len(value) == 2 and isinstance(value, tuple):
for x in value:
if x >= 0 and x <= 1:
pass
else:
valid = False
break
else:
valid = False
return valid | 4f5dae26d24a31c9a9384e804cab983c1444d238 | 687,327 |
def dec2dec(dec):
"""
Convert sexegessimal RA string into a float in degrees.
Parameters
----------
dec : str
A string separated representing the Dec.
Expected format is `[+- ]hh:mm[:ss.s]`
Colons can be replaced with any whit space character.
Returns
-------
dec : float
The Dec in degrees.
"""
d = dec.replace(':', ' ').split()
if len(d) == 2:
d.append('0.0')
if d[0].startswith('-') or float(d[0]) < 0:
return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0
return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0 | f791d24a1ba6831b793e217c2f72dba67b31bd25 | 687,331 |
import torch
def cosine_similarity(x1, x2):
"""Calculates cosine similarity of two tensor."""
dist = torch.sum(torch.multiply(x1, x2), dim=-1)
dist = dist / (torch.linalg.norm(x1, dim=-1) * torch.linalg.norm(x2, dim=-1))
return dist | 7506fd6379a7ba604852f4bd9f969c9dfb82f09a | 687,333 |
from typing import List
def generate_board(
row: int,
column: int,
) -> List[List[str]]:
"""
Generate a new board in a row * column manner.
Parameters
----------
row: int
A number that indicate how many row should be generated.
column: int
A numebr that indicated how many column should be generated.
Returns
-------
board: List[List[str]]
2D array containing all the game detail, including column header, row header and placed buildings.
"""
board = []
# * Preparation of the column header * #
ordinal = ord('A')
header_list = [' '] + [chr(ordinal + i) for i in range(column)]
board.append(header_list)
# * Preparation for each row * #
for i in range(1, row + 1):
row_list = [str(i)] + [' ' for _ in range(column)]
board.append(row_list)
return board | 86610239ec107eb5261f10a0425773310b3fb343 | 687,336 |
def cpd_int(p, r, t, n=12):
"""
Calculate compound interest
:param p: (float) - principal
:param r: (float) - annual interest rate
:param t: (int) - time(years)
:param n: (int) - number of times interest is compounded each year
:return a: (float) - compound interest
"""
a = p * (1 + r / n) ** (n * t)
return a | 56493d9c3e77f23f0a56bb262ecee6ab2b98ba83 | 687,341 |
import re
def clean_newline(line):
"""Cleans string so formatting does not cross lines when joined with \\n.
Just looks for unpaired '`' characters, other formatting characters do not
seem to be joined across newlines.
For reference, discord uses:
https://github.com/Khan/simple-markdown/blob/master/simple-markdown.js
"""
match = None
for match1 in re.finditer(r'(`+)\s*([\s\S]*?[^`])\s*\1(?!`)', line):
match = match1
idx = match.end() if match else 0
line = line[:idx] + line[idx:].replace('`', '\`')
return line | 0b8c294e9dfd806bcafeefd682af785f02a1fe7e | 687,342 |
def lu_decomposition(matrix_in, q=0):
""" LU-Factorization method using Doolittle's Method for solution of linear systems.
Decomposes the matrix :math:`A` such that :math:`A = LU`.
The input matrix is represented by a list or a tuple. If the input matrix is 1-dimensional, i.e. a list or tuple of
integers and/or floats, then the second function argument ``q`` must be bigger than zero. If the input matrix is
2-dimensional, i.e. list of lists of integers and/or floats, then there is no need to input ``q`` as it will be
automatically computed.
:param matrix_in: Input matrix (must be a square matrix)
:type matrix_in: list, tuple
:param q: matrix size (not used if the input matrix is 2-dimensional)
:type q: int
:return: a tuple containing matrices (L,U)
:rtype: tuple
"""
if not isinstance(q, int):
raise TypeError("Matrix size must be an integer")
if q < 0:
raise ValueError("Matrix size should be bigger than zero")
# Flag for converting return values into 1-dimensional list
convert_res = False
if q > 0:
# Check if the 1-dimensional input matrix is a square matrix
if len(matrix_in) != q ** 2:
raise ValueError("The input matrix must be a square matrix")
# Convert 1-dimensional matrix to 2-dimensional
matrix_a = [[0.0 for _ in range(q)] for _ in range(q)]
for i in range(0, q):
for j in range(0, q):
matrix_a[i][j] = matrix_in[j + (q * i)]
# The input is 1-dimensional, so the return values should be
convert_res = True
else:
matrix_a = matrix_in
# Check if the 2-dimensional input matrix is a square matrix
q = len(matrix_a)
for idx, m_a in enumerate(matrix_a):
if len(m_a) != q:
raise ValueError("The input must be a square matrix. " +
"Row " + str(idx + 1) + " has a size of " + str(len(m_a)) + ".")
# Initialize L and U matrices
matrix_u = [[0.0 for _ in range(q)] for _ in range(q)]
matrix_l = [[0.0 for _ in range(q)] for _ in range(q)]
# Doolittle Method
for i in range(0, q):
for k in range(i, q):
# Upper triangular (U) matrix
matrix_u[i][k] = float(matrix_a[i][k] - sum([matrix_l[i][j] * matrix_u[j][k] for j in range(0, i)]))
# Lower triangular (L) matrix
if i == k:
matrix_l[i][i] = 1.0
else:
matrix_l[k][i] = float(matrix_a[k][i] - sum([matrix_l[k][j] * matrix_u[j][i] for j in range(0, i)]))
matrix_l[k][i] /= float(matrix_u[i][i])
# Prepare and return the L and U matrices
if convert_res:
m_u = []
m_l = []
for upper, lower in zip(matrix_u, matrix_l):
m_u.extend(upper)
m_l.extend(lower)
return m_l, m_u
return matrix_l, matrix_u | 452668c88cc39bed3c804bdcdec6650412d5355f | 687,352 |
def count_user_type(data_list):
"""
Conta os tipos de usuário nos registros de uma lista.
Argumentos:
data_list: Lista de registros contendo o tipo do usuário em uma das colunas.
Retorna:
Número de usuários 'Subscriber' e 'Customer', nesta ordem.
"""
subscriber = 0
customer = 0
for sample in data_list:
if sample[-3] == 'Subscriber':
subscriber += 1
elif sample[-3] == 'Customer':
customer += 1
return [subscriber, customer] | cf464f8f276e337a0254988a58b7caa9b70083cb | 687,353 |
def index_api_data(parsed_json, id_field):
"""Transform a list of dicts into a dict indexed by one of their fields.
>>> index_api_data([{'id': 'eggs', 'val1': 42, 'foo': True},
... {'id': 'spam', 'val1': 1, 'foo': True}], 'id')
{'eggs': {'val1': 42, 'foo': True}, 'spam': {'val1': 1, 'foo': True}}
>>> index_api_data([{'id': 'eggs', 'val1': 42, 'foo': True},
... {'id': 'spam', 'val1': 1, 'foo': True}], 'val1')
{1: {'foo': True, 'id': 'spam'}, 42: {'foo': True, 'id': 'eggs'}}
"""
transformed_dict = {}
for attr in parsed_json:
# make a copy of the attr dict
# remove id field:
if not id_field in attr:
raise RuntimeError("Field '{}' not found in json object".format(
id_field))
id_val = attr[id_field]
if id_val in transformed_dict:
raise RuntimeError("Identifier '{}' found more than once in json "
"object".format(id_val))
# make a copy of the sub-dictionary without the id field
attr_dict = dict(attr)
del attr_dict[id_field]
transformed_dict[id_val] = attr_dict
return transformed_dict | fbee9dbfdf3f75c2a1a0f2e2b8b71be0bb5569fa | 687,354 |
def client_factory(client_cls, dispatcher, settings):
"""Shared logic to instantiate a configured torque client utility."""
torque_url = settings.get('torque.url')
torque_api_key = settings.get('torque.api_key')
return client_cls(dispatcher, torque_url, torque_api_key) | c174a2a65bd8146c0f52db07b43d4a80eb37e04e | 687,356 |
from typing import Tuple
import torch
from typing import List
def encode_supervisions(
supervisions: dict, subsampling_factor: int
) -> Tuple[torch.Tensor, List[str]]:
"""
Encodes Lhotse's ``batch["supervisions"]`` dict into a pair of torch Tensor,
and a list of transcription strings.
The supervision tensor has shape ``(batch_size, 3)``.
Its second dimension contains information about sequence index [0],
start frames [1] and num frames [2].
The batch items might become re-ordered during this operation -- the
returned tensor and list of strings are guaranteed to be consistent with
each other.
"""
supervision_segments = torch.stack(
(
supervisions["sequence_idx"],
supervisions["start_frame"] // subsampling_factor,
supervisions["num_frames"] // subsampling_factor,
),
1,
).to(torch.int32)
indices = torch.argsort(supervision_segments[:, 2], descending=True)
supervision_segments = supervision_segments[indices]
texts = supervisions["text"]
texts = [texts[idx] for idx in indices]
return supervision_segments, texts | 8cd6a0ef6fa5027af454e804b82dacfe6f44be12 | 687,357 |
from bs4 import BeautifulSoup
def fetch_links_from_html(html_doc):
"""
Given a blob of HTML, this function returns a list of PDF links
"""
soup = BeautifulSoup(html_doc)
pdf_attachments = []
for link in soup.findAll('a'):
value = link.get('href')
if "http" in value:
pdf_url = value
else:
pdf_url = "https://www.gov.uk" + value
pdf_attachments.append(pdf_url)
return pdf_attachments | b2b8c2b4102637a5b011988d6d63f8f74ba3eca0 | 687,358 |
import torch
def depth_map_to_3d_torch(depth, cam_K, cam_W):
"""Derive 3D locations of each pixel of a depth map.
Args:
depth (torch.FloatTensor): tensor of size B x 1 x N x M
with depth at every pixel
cam_K (torch.FloatTensor): tensor of size B x 3 x 4 representing
camera matrices
cam_W (torch.FloatTensor): tensor of size B x 3 x 4 representing
world matrices
Returns:
loc3d (torch.FloatTensor): tensor of size B x 3 x N x M
representing color at given 3d locations
mask (torch.FloatTensor): tensor of size B x 1 x N x M with
a binary mask if the given pixel is present or not
"""
depth = torch.from_numpy(depth)
cam_K = torch.from_numpy(cam_K)
cam_W = torch.from_numpy(cam_W)
N, M = depth.size()
device = depth.device
# Turn depth around. This also avoids problems with inplace operations
depth = -depth.permute(1,0)
zero_one_row = torch.tensor([[0., 0., 0., 1.]])
zero_one_row = zero_one_row.expand(1, 4).to(device)
# add row to world mat
cam_W = torch.cat((cam_W, zero_one_row), dim=0)
# clean depth image for mask
# upperlimit = 1.e+10
upperlimit = float("Inf")
mask = (depth.abs() != upperlimit).float()
depth[depth == upperlimit] = 0
depth[depth == -1*upperlimit] = 0
# 4d array to 2d array k=N*M
d = depth.reshape(1,N * M)
# create pixel location tensor
px, py = torch.meshgrid([torch.arange(0, N), torch.arange(0, M)])
px, py = px.to(device), py.to(device)
p = torch.cat((
px.expand(px.size(0), px.size(1)),
(M - py).expand(py.size(0), py.size(1))
), dim=0)
p = p.reshape(2, py.size(0) * py.size(1))
p = (p.float() / M * 2)
# create terms of mapping equation x = P^-1 * d*(qp - b)
P = cam_K[:2, :2].float().to(device)
q = cam_K[2:3, 2:3].float().to(device)
b = cam_K[:2, 2:3].expand(2, d.size(1)).to(device)
Inv_P = torch.inverse(P).to(device)
rightside = (p.float() * q.float() - b.float()) * d.float()
x_xy = torch.matmul(Inv_P, rightside)
# add depth and ones to location in world coord system
x_world = torch.cat((x_xy, d, torch.ones_like(d)), dim=0)
# derive loactoion in object coord via loc3d = W^-1 * x_world
Inv_W = torch.inverse(cam_W)
Inv_W_exp = Inv_W.expand(4, 4)
loc3d = torch.matmul(Inv_W_exp, x_world.double())
loc3d = loc3d.reshape(4, N, M)
loc3d = loc3d[:3,:,:].to(device)
mask = mask.to(device)
loc3d = loc3d.view(3, N * M)
return loc3d, mask | adfa95abb2cf5be5ccf499f8231743a1416c9c50 | 687,367 |
from typing import Iterable
def flatten_iterable(its: Iterable, deep: bool = False) -> list:
"""
flatten instance of Iterable to list
Notes:
1. except of str, won't flatten 'abc' to 'a', 'b', 'c'
demo: [[[1], [2], [3]], 4]
if deep is True: flatten to [1, 2, 3, 4]
if deep is False, flatten to [[1], [2], [3], 4].
"""
res = []
for it in its:
if isinstance(it, str):
res.append(it)
elif isinstance(it, Iterable):
if deep:
res += flatten_iterable(it, True)
else:
res.extend(it)
else:
res.append(it)
return res | 4c17d75a7dde4d0b9002dedea2938deacfef813f | 687,371 |
def find_frequency_bandwidth(frequency, simulation_parameters):
"""
Finds the correct bandwidth for a specific frequency from the
simulation parameters.
"""
simulation_parameter = 'channel_bandwidth_{}'.format(frequency)
if simulation_parameter not in simulation_parameters.keys():
KeyError('{} not specified in simulation_parameters'.format(frequency))
bandwidth = simulation_parameters[simulation_parameter]
return bandwidth | f511ca981cba86a4ca3b741fc492bf6b36ef26b5 | 687,374 |
def isLoopClockwise(loop):
"""Gets if a loop of line segments is clockwise
Parameters
----------
loop : List or np array of shape (-1, 2, 2)
-1 number of line segments, [startPoint, endPoint], [x,y]
Returns
-------
bool
Note
-------
https://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order
"""
s = 0
for i in loop:
s += (i[1][0] - i[0][0]) * (i[1][1] + i[0][1])
return s > 0 | 1db58340ac6a8d0e958344148290d6762f876947 | 687,380 |
def getEnglishText(prop):
"""Gets the English text for the given property and returns a string.
Must be parent node that contains "gco:CharacterString" as a direct child.
Args:
prop: Nodelist object to retrieve text from.
Returns:
String of English text (or empty if none exists).
"""
try:
characterString = prop.item(0).getElementsByTagName("gco:CharacterString")
if characterString.length > 0 and characterString.item(0).hasChildNodes():
return characterString.item(0).firstChild.data
else:
return ""
except:
return "" | 7545f78633e8d1b68c26f25e30253023d897a3d1 | 687,384 |
def is_smile_inside_face(smile_coords, face_coords):
"""Function to check if the smile detected is inside a face or not
Args:
smile_coords (list): list of smaile coordinates of form [x, y, (x+w), (y+h)]
face_coords (list): list of face coordinates of form [x, y, (x+w), (y+h)]
Returns:
bool: True if smile is inside of face bounding box, else False
"""
sx1, sy1, sx2, sy2 = smile_coords
fx1, fy1, fx2, fy2 = face_coords
# If top-left plate corner is inside the face
if fx1 < sx1 and fy1 < sy1:
# If bottom-right plate corner is inside the face
if sx2 < fx2 and sy2 < fy2:
# The entire smile is inside the face.
return True
else:
# Some part of the smile is outside the face.
return False
else:
# whole smile is outside the face.
return False | 7a8d1a03ec3eed2743b6c70ed6df6a871a8b1276 | 687,386 |
def get_max_drawdown_from_series(r):
"""Risk Analysis from asset value
cumprod way
Parameters
----------
r : pandas.Series
daily return series
"""
# mdd = ((r.cumsum() - r.cumsum().cummax()) / (1 + r.cumsum().cummax())).min()
mdd = (((1 + r).cumprod() - (1 + r).cumprod().cummax()) / ((1 + r).cumprod().cummax())).min()
return mdd | afb637f4d79c3a10738be6723c7078003b2eae61 | 687,387 |
import math
def dist_to_line(line, point):
"""
Finds a point's distance from a line of infinite length. To find a point's
distance from a line segment, use dist_to_line_seg instead.
line: ((lx0,ly0), (lx1,ly1)) Two points on the line
point: (px, py) The point to find the distance from
returns: the distance between the point and the line
"""
x1,y1 = line[0]
x2,y2 = line[1]
x3,y3 = point
# where on line the perpendicular is
u = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))
/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )
# intersection point
x = x1 + u*(x2-x1)
y = y1 + u*(y2-y1)
dist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))
return dist | f5bb6468666a64790a21b027392c4b4acf530048 | 687,390 |
def personal_best(scores):
"""
Return the highest score in scores.
param: list of scores
return: highest score in scores
"""
return max(scores) | e2fe2bcb01923e1f55cc2883d3dbb3e9b2712437 | 687,393 |
def identify_value_block(block: dict) -> str:
"""Given a key block, find the ID of the corresponding value block."""
return [x for x in block["Relationships"] if x["Type"] == "VALUE"][0]["Ids"][0] | e2ce357bbd675a35e58671acd6dbaf01acfefae5 | 687,397 |
import logging
def mergeDoc(existing_doc, new_doc):
""" existing_doc is merged with new_doc.
Returns true/false if existing_doc is modified.
"""
records = existing_doc.setdefault("records", [])
if 'records' not in new_doc:
return False
isModified = False
for new_record in new_doc['records']:
if new_record not in records:
records.append(new_record)
isModified = True
logging.info("# merged records %d " % len(records))
# Merge images.
images = existing_doc.setdefault("images", [])
for new_image in new_doc['images']:
if new_image not in images:
images.append(new_image)
isModified = True
logging.info("# merged images %d " % len(images))
# Merge sources.
sources = existing_doc.setdefault("sources", [])
for new_source in new_doc['source']:
if new_source not in sources:
sources.append(new_source)
isModified = True
logging.info("# merged sources %d " % len(sources))
return isModified | e59317ae2bc20de1f71f901b2258b2926a7cbc0a | 687,399 |
import json
def parse_codacy_conf(filename, toolname="cfn-lint"):
"""Try to load codacy.json file
If the file is missing return two empty lists, otherwise,
if the file exist and has files and/or patterns return those.
:param filename: name of the file to parse (typically /.codacyrc)
:param toolname: name of the tool, used to get patterns from the codacy.json file
:return : list of files, list of patterns to check
"""
try:
with open(filename) as f:
codacyrc = json.load(f)
if "files" in codacyrc:
files = codacyrc["files"]
else:
files = list()
patterns = list()
for tool in codacyrc["tools"]:
if tool["name"] == toolname:
patterns = [pattern["patternId"] for pattern in tool["patterns"]]
return files, patterns
except:
return list(), list() | aedf32d51dd56b2e939a733121ef72eb9211ed54 | 687,402 |
def convert_mip_type_to_python_type(mip_type: str):
"""
Converts MIP's types to the relative python class.
The "MIP" type that this method is expecting is related to
the "sql_type" enumerations contained in the CDEsMetadata.
"""
type_mapping = {
"int": int,
"real": float,
"text": str,
}
if mip_type not in type_mapping.keys():
raise KeyError(
f"MIP type '{mip_type}' cannot be converted to a python class type."
)
return type_mapping.get(mip_type) | 4d00909eb5dee1212ef6e45a5a7cf3a5341b36aa | 687,403 |
from typing import Sequence
from typing import Tuple
import math
def quaternion_to_euler(quat: Sequence[float]) -> Tuple[float,float,float]:
"""
Convert WXYZ quaternion to XYZ euler angles, using the same method as MikuMikuDance.
Massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
:param quat: 4x float, W X Y Z quaternion
:return: 3x float, X Y Z angle in degrees
"""
w, x, y, z = quat
# pitch (y-axis rotation)
sinr_cosp = 2 * ((w * y) + (x * z))
cosr_cosp = 1 - (2 * ((x ** 2) + (y ** 2)))
pitch = -math.atan2(sinr_cosp, cosr_cosp)
# yaw (z-axis rotation)
siny_cosp = 2 * ((-w * z) - (x * y))
cosy_cosp = 1 - (2 * ((x ** 2) + (z ** 2)))
yaw = math.atan2(siny_cosp, cosy_cosp)
# roll (x-axis rotation)
sinp = 2 * ((z * y) - (w * x))
if sinp >= 1.0:
roll = -math.pi / 2 # use 90 degrees if out of range
elif sinp <= -1.0:
roll = math.pi / 2
else:
roll = -math.asin(sinp)
# fixing the x rotation, part 1
if x ** 2 > 0.5 or w < 0:
if x < 0:
roll = -math.pi - roll
else:
roll = math.pi * math.copysign(1, w) - roll
# fixing the x rotation, part 2
if roll > (math.pi / 2):
roll = math.pi - roll
elif roll < -(math.pi / 2):
roll = -math.pi - roll
roll = math.degrees(roll)
pitch = math.degrees(pitch)
yaw = math.degrees(yaw)
return roll, pitch, yaw | 6bbce0b502f42e63b181ea65b6fff66d7294210b | 687,404 |
import jinja2
def load_jinja(
path,
file,
vrf_name,
bandwidth,
packet_size,
ref_packet_size,
time_interval,
ipp4_bps,
ipp2_bw_percent,
ipp0_bw_percent,
interface,
):
"""Use Jinja templates to build the device configuration
Args:
device (`obj`): Device object
vrf_name (`str`): Vrf name to be used in configuration
bandwidth (`int`): In bps, bandwidth for traffic flow
packet_size (`int`): Config packet size
ref_packet_size (`int`): Refrenced packet size
time_interval (`float`): In seconds, used for calculating bc
ipp4_bps (`int`): In bps, bandwidth for IPP4 traffic
ipp2_bw_percent (`int`): In percents, bandwidth for IPP2 traffic
ipp0_bw_percent (`int`): In percents, bandwidth for IPP0 traffic
interface (`str`): Where to apply the configured policies
Returns:
out
"""
env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=path))
template = env.get_template(file)
out = template.render(
vrf_name=vrf_name,
bandwidth=bandwidth,
packet_size=packet_size,
ref_packet_size=ref_packet_size,
time_interval=time_interval,
ipp4_bps=ipp4_bps,
ipp2_bw_percent=ipp2_bw_percent,
ipp0_bw_percent=ipp0_bw_percent,
interface=interface,
)
return out | db4087efdfe1e6982ce8ce670542c41a3b4fa100 | 687,406 |
async def ladders(database, platform_id):
"""Get ladders for a platform."""
query = "select id as value, name as label from ladders where platform_id=:platform_id"
return list(map(dict, await database.fetch_all(query, values={'platform_id': platform_id}))) | 56beb60e2fc1df38a222aac4e661ac27b8b52f7a | 687,409 |
import hashlib
def get_unique_str(seed: str) -> str:
"""Generate md5 unique sting hash given init_string."""
return hashlib.md5(seed.encode("utf-8")).hexdigest() | be42bddd7f952017f3f88cbc5ccaa916a5e47872 | 687,410 |
from typing import List
def get_characters_from_file(file_path: str) -> List[str]:
"""
Opens the specified file and retrieves a list of characters.
Assuming each character is in one line.
Characters can have special characters including a space character.
Args:
file_path (str): path to the file
Returns:
List[str]: List of character names
"""
characters = []
with open(file_path, 'r') as characters_file:
characters = [
# Remove leading/trailing spaces
character_name.strip()
for character_name in characters_file.readlines()
# It may contain empty lines or comments
if character_name.strip() and character_name[0] != '#'
]
return characters | 50fda9d3e4b2e1dd5724174549967165879dcc14 | 687,413 |
def _get_edge(layer_idx_start, layer_idx_end):
""" Returns a tuple which is an edge. """
return (str(layer_idx_start), str(layer_idx_end)) | 3dff85ce5c328451dafbb9704d08db2a9cea0019 | 687,416 |
def _fasta_slice(fasta, seqid, start, stop, strand):
"""
Return slice of fasta, given (seqid, start, stop, strand)
"""
_strand = 1 if strand == '+' else -1
return fasta.sequence({'chr': seqid, 'start': start, 'stop': stop, \
'strand': _strand}) | 49ab8d1ec4de8ee0f027c06dbcdaf4eb5c270c67 | 687,417 |
def set_coordinates(atoms, V, title="", decimals=8):
"""
Print coordinates V with corresponding atoms to stdout in XYZ format.
Parameters
----------
atoms : list
List of atomic types
V : array
(N,3) matrix of atomic coordinates
title : string (optional)
Title of molecule
decimals : int (optional)
number of decimals for the coordinates
Return
------
output : str
Molecule in XYZ format
"""
N, D = V.shape
fmt = "{:2s}" + (" {:15."+str(decimals)+"f}")*3
out = list()
out += [str(N)]
out += [title]
for i in range(N):
atom = atoms[i]
atom = atom[0].upper() + atom[1:]
out += [fmt.format(atom, V[i, 0], V[i, 1], V[i, 2])]
return "\n".join(out) | c818824ed98711de6fbbd9499999692debbb5307 | 687,421 |
from typing import Dict
def strip_leading_underscores_from_keys(d: Dict) -> Dict:
"""
Clones a dictionary, removing leading underscores from key names.
Raises ``ValueError`` if this causes an attribute conflict.
"""
newdict = {}
for k, v in d.items():
if k.startswith('_'):
k = k[1:]
if k in newdict:
raise ValueError(f"Attribute conflict: _{k}, {k}")
newdict[k] = v
return newdict | a31e5bfe9b55c61364f166b33e4a575171feb0bf | 687,425 |
from typing import List
from typing import Optional
from typing import Tuple
def split_by(items: List[str],
separator: Optional[str] = None) -> Tuple[List[str], List[str]]:
"""If the separator is present in the list, returns a 2-tuple of
- the items before the separator,
- all items after the separator.
If the separator isn't present, returns a tuple of
- (the original list, [])
"""
if separator is None:
separator = '--'
try:
idx = items.index(separator)
return items[0:idx], items[idx + 1:]
except ValueError:
return (items, []) | 947219eaed147800b54480696fb751134d5eacd8 | 687,428 |
def value_of_ace(hand_value):
"""
:param hand_value: int - current hand value.
:return: int - value of the upcoming ace card (either 1 or 11).
"""
if hand_value + 11 > 21:
value = 1
else:
value = 11
return value | f57a2f630340b864c759bdd2f7600e44e895b599 | 687,430 |
def _parse_ports(ports_text):
"""
Handle the case where the entry represents a range of ports.
Parameters
----------
ports_text: str
The text of the given port table entry.
Returns
-------
tuple
A tuple of all ports the text represents.
"""
ports = ports_text.split('-')
try:
if len(ports) == 2:
ports = tuple(range(int(ports[0]), int(ports[1]) + 1))
else:
ports = (int(ports[0]),)
except ValueError:
return ()
return ports | eb6eed9a5f8ea91d448be9c0eede9f5b258cf358 | 687,432 |
import socket
import struct
def long2ip(l):
"""Convert big-endian long representation of IP address to string
"""
return socket.inet_ntoa(struct.pack("!L", l)) | 57c3653a3e8748a6d8461d1b89f4c3a0fa6a4d3c | 687,435 |
from bs4 import BeautifulSoup
def get_soup(html):
""" Get the Beautiful Soup tree from HTML. """
# return BeautifulSoup(req.content, "html.parser")
# return BeautifulSoup(req.text, "html5lib") # Haven't tested this yet
return BeautifulSoup(html, "html.parser") | 500915855594ab722cf63ecae29190e9dc907954 | 687,439 |
import re
def replacenth(string, sub, wanted, n):
"""Replace nth word in a sentence
string: Complete string
sub: Substring to be replaced
wanted: Replace by wanted
n: index of the occurence of sub to be replaced
"""
where = [m.start() for m in re.finditer(sub, string)][n-1]
before = string[:where]
after = string[where:]
after = after.replace(sub, wanted, 1)
newString = before + after
return newString | a3987b6f0c248cac6a01ae9a4a1912d88d52d630 | 687,443 |
def _SNR(coef):
"""
Return the signal-to-noise ratio for each constituent.
"""
if "Lsmaj" in coef:
SNR = (coef["Lsmaj"] ** 2 + coef["Lsmin"] ** 2) / (
(coef["Lsmaj_ci"] / 1.96) ** 2 + (coef["Lsmin_ci"] / 1.96) ** 2
)
else:
SNR = (coef["A"] ** 2) / (coef["A_ci"] / 1.96) ** 2
return SNR | ac3bdd752d062a72ffc64fb9171e3f636d0e26e8 | 687,444 |
from typing import Counter
import re
def is_valid_sentence(str):
"""
Check if a sentence is valid:
- Does not contain mismatched brackets
- Does not start with 'and' or 'but'
- Ends with a punctuation [.?!]
- Has a minimum length of 5
"""
start_pattern = r"^\"*[A-Z]+"
end_pattern = r"(\.\?\!)\"*$"
tokens = str.strip().split('\t')
if len(tokens) != 2:
return False, None, None
sid, sent = tokens
normalized = sent.replace('``', '\"')
normalized = normalized.replace('\'\'', '\"')
# filtering
# unmatched brackets
count = Counter(normalized)
if count['\"'] % 2 != 0:
valid = False
# sentences start with 'and' and 'but'
elif normalized.lower().startswith('and') or normalized.lower().startswith('but'):
valid = False
# check start and end of word
elif not re.search(start_pattern, normalized) and not re.search(end_pattern, normalized):
valid = False
elif len(normalized.split()) < 5:
valid = False
else:
valid = True
return valid, sid, sent | 192a60a5a4cc3e26d9b04cbe5a0df82a4fe72187 | 687,449 |
def prepend(base, prefix):
"""Prepend a prefix to a string"""
return f"{prefix}{base}" | 7d9d240a10405d7e404b46a4f0cfbc968cd3af18 | 687,451 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.