content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def sum(n):
"""
Returns the sum of integers between 1 and `n` (inclusive).
This implementation uses recursion.
"""
if n == 1:
return 1
elif n > 1:
return n + sum(n - 1) | 21a435096cc914a9b91fdef9058524c18e8a788c | 683,627 |
from typing import Optional
import re
def read_commit(version_data: str) -> Optional[str]:
"""Parse commit string from version data
@param version_data: Contents of version file
@return: commit, or None if not found
"""
p = re.compile('.*Commit: ([^\n\r]*)', re.DOTALL)
match = p.match(version_data)
if match is None:
return None
commit = match.group(1)
return commit | fa3bbe463f133984a874bd18a442a7ed248fd540 | 683,632 |
def z2lin(array):
"""dB to linear values (for np.array or single number)"""
return 10 ** (array / 10.) | 9ec85927580709efaae322a72f6dec21ebcf0e6a | 683,633 |
def model_field_attr(model, model_field, attr):
"""
Returns the specified attribute for the specified field on the model class.
"""
fields = dict([(field.name, field) for field in model._meta.fields])
return getattr(fields[model_field], attr) | b6fdd6be26c9f5c49f3f5c8ad05331f547fe4559 | 683,635 |
def count_models(block):
"""Count models in structure file block.
:param block: PDBx data block
:type block: [str]
:return: number of models in block
:rtype: int
"""
atom_obj = block.get_object("atom_site")
model_num = []
for i in range(atom_obj.row_count):
tmp = atom_obj.get_value("pdbx_PDB_model_num", i)
if tmp not in model_num:
model_num.append(tmp)
return model_num | 94f20675a76edfc202994d27442f5bd920b7528b | 683,639 |
def hex_to_bin(txt: str) -> str:
"""Convert hexadecimal string to binary string.Useful for preprocessing the key and plaintext in different settings."""
return bin(int(txt,16))[2:] | beffae4a0eb8bda8a56e0a23fb9aefcdbd41dd37 | 683,643 |
def clean_lemma(lemma: str, pos: str) -> str:
"""Cleans whitespace and special symbols
Args:
lemma: Raw token lemma.
pos: Lemma POS.
Returns:
Clean lemma.
"""
out_lemma = lemma.strip().replace(" ", "").replace("_", "").lower()
if pos != "PUNCT":
if out_lemma.startswith("«") or out_lemma.startswith("»"):
out_lemma = "".join(out_lemma[1:])
if out_lemma.endswith("«") or out_lemma.endswith("»"):
out_lemma = "".join(out_lemma[:-1])
if (
out_lemma.endswith("!")
or out_lemma.endswith("?")
or out_lemma.endswith(",")
or out_lemma.endswith(".")
):
out_lemma = "".join(out_lemma[:-1])
return out_lemma | b38f04dd27384d5baa339d39efad4cf8ca45d3fd | 683,644 |
def to_byte(val):
"""Cast an int to a byte value."""
return val.to_bytes(1, 'little') | 556c5e416566384013a1bfdce7b14b8621d40c90 | 683,646 |
from typing import Tuple
import re
def replace_code(
begin_delim: str, end_delim: str, content: str, new_code: str
) -> Tuple[str, int]:
"""Replaces text delimited by `begin_delim` and `end_delim` appearing in `content`, with `new_code`.
Returns new string and number of matches made."""
return re.subn(
fr"{re.escape(begin_delim)}([\s\S]*?){re.escape(end_delim)}",
new_code.replace(
"\\", "\\\\"
), # Need to escape backslashes twice for re package
content,
) | ab7cfff3c3ae7b0356a1b3558b2fa36c1beb5401 | 683,657 |
import requests
def get_root_domains(url, filename):
""" Updates root domain file.
:param url: URL of the root domains list.
:param filename: File name to write the list.
"""
r = requests.get(url)
with open(filename, 'w') as f:
f.write(r.text)
return True | d5427f77e4eba72b9952ea7b6059d620a18d5520 | 683,658 |
def create_hf(geom):
"""Create header and footer for different types of geometries
Args:
geom (str): geometry type, e.g., polygone
"""
if geom == "polygone":
header = """
{
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
"""
footer = """
]
}
}
]
}"""
else:
raise ValueError(f"{geom} is not implemented.")
return header, footer | 28276e77ccaa2943dfe4bd2f65a19347f6b1cc1c | 683,660 |
import math
def string_to_array(s):
"""Convert pipe separated string to array."""
if isinstance(s, str):
out = s.split("|")
elif math.isnan(s):
out = []
else:
raise ValueError("Value must be either string of nan")
return out | 9e8755a15a2e8da4571d94be50200a01201912d3 | 683,663 |
import math
import hashlib
def adventcoin_mine(salt, zeros, prob=0.99):
"""MD5-hashes salt + counter, increasing counter until hash begins with a given number of 0's in HEX,
or until maximum value is reached
:param salt: string to append before countes
:param zeros: number of zeros to search for
:param prob: float between 0 and 1, we stop the search if we didn't find the value with this confidence interval
:return: positive number that satisfies the condition, or 0 if the maximum value was exceeded
"""
i = 0
zeros_string = '0'*zeros
if 1-prob > 1e-8:
max_i = int(round(math.log(1-prob, 1-(1/16) ** zeros)))
else:
max_i = 0
while True:
if i > max_i > 0: # max_i = 0 means we ignore maximum
# We stop here
return 0
i += 1
md5_hash = hashlib.md5((salt+str(i)).encode('utf8')).hexdigest()
if md5_hash.startswith(zeros_string):
break
return i | 5bab24e24611fc81e07767c91f9c447e0c78afa7 | 683,664 |
def _format_cached_grains(cached_grains):
"""
Returns cached grains with fixed types, like tuples.
"""
if cached_grains.get("osrelease_info"):
osrelease_info = cached_grains["osrelease_info"]
if isinstance(osrelease_info, list):
cached_grains["osrelease_info"] = tuple(osrelease_info)
return cached_grains | b916043859288ae13ebfbe6e14daa3846baf4321 | 683,667 |
def unfold_fields(lines):
"""Unfold fields that were split over multiple lines.
Returns:
A list of strings. Each string represents one field (a name/value pair
separated by a colon).
>>> unfold_fields("foo \n bar \n baz \nbiz \nboz ")
['foo bar baz ', 'biz ', 'boz ']
"""
fields = []
for line in lines:
if line.startswith(" "):
fields[-1] += line
elif line.strip():
fields.append(line)
return fields | 6270fc9c67bc3ab37e2262e6c7d161bb4a3cc505 | 683,669 |
def analyzer(klass):
"""Return an instance of the CUT with some defaults."""
a = klass(
start_states=["In Progress", ],
commit_states=["Selected", "Created"],
end_states=["Done", ]
)
return a | bf58bce04b6d4ad50ac38760b9a942f50ea2f944 | 683,671 |
def map_symbols_to_currencies(currencies):
"""
Create dictionary where key is symbol of currency and value is
currency itself
:param list currencies:
List of dictionaries with data about many currencies
:return: Dictionary with symbols and currencies
:rtype: dict
:raises KeyError: When given argument has wrong format
"""
result_dict = {}
for currency_dict in currencies:
result_dict[currency_dict["symbol"]] = currency_dict["cc"]
return result_dict | 5cb0516af69e86621dbcca338a52e911a65bc429 | 683,675 |
import yaml
def load_configs(s: str) -> dict:
"""Load config from string."""
return yaml.load(s, Loader=yaml.FullLoader) | 766969cd684fae6d873e96ba8f6bc6875f8f98fd | 683,686 |
def extract_text_body(parsed_email):
"""
Extract email message content of type "text/plain" from a parsed email
Parameters
----------
parsed_email: email.message.Message, required
The parsed email as returned by download_email
Returns
-------
string
string containing text/plain email body decoded with according to the Content-Transfer-Encoding header
and then according to content charset.
None
No content of type "text/plain" is found.
"""
text_content = None
text_charset = None
if parsed_email.is_multipart():
# Walk over message parts of this multipart email.
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
text_content = part.get_payload(decode=True)
text_charset = part.get_content_charset()
break
else:
text_content = parsed_email.get_payload(decode=True)
text_charset = parsed_email.get_content_charset()
if text_content and text_charset:
return text_content.decode(text_charset)
return | 20f35f18850fb6ca5718242c7ce8e8162ea6eb1a | 683,691 |
from typing import List
def demand_satisfied(people_after: List) -> List[bool]:
"""Verifies that each person gets the appropriate number of appointments.
We assume that scheduling occurs over a single week. Thus, people in the
`1x` cohort get one test, people in the `2x` cohort get two tests,
and people without a cohort due to incompatibility (the `None` cohort)
get no tests. All people in the `None` cohort should not be `assigned`;
people in the other cohorts should be `assigned`.
Args:
people_after: A roster of people with assigned schedules.
Returns:
A list of booleans indicating whether each person has the right number
of appointments.
"""
valid = []
cohort_demands = {'1x': 1, '2x': 2, None: 0}
for person in people_after:
n_appointments = sum(len(day) for day in person['schedule'].values())
if person['assigned']:
valid.append(n_appointments == cohort_demands[person['cohort']]
and person['cohort'] is not None)
else:
valid.append(n_appointments == 0 and person['cohort'] is None)
return valid | bd1e2c8286bf3ee48fa2300cfb8f7331cfcf16aa | 683,693 |
def progress_to_dict(path: str) -> dict:
"""
Converts a Delphin progress file into a dict.
:param path: path to folder
:return: converted progress dict
"""
file_obj = open(path + '/progress.txt', 'r')
lines = file_obj.readlines()
file_obj.close()
progress_dict = {'simulation_time': [],
'real_time': [],
'percentage': []
}
for i in range(1, len(lines)):
line = lines[i].split('\t')
progress_dict['simulation_time'].append(int(line[0].strip()))
progress_dict['real_time'].append(float(line[1].strip()))
progress_dict['percentage'].append(float(line[2].strip()))
return progress_dict | 879f655427307e88da5057574419a491188b93b7 | 683,703 |
import torch
def round(tensor, decimal_places):
"""
Round floats to the given number of decimal places.
:param tensor: input tensor
:type tensor: torch.Tensor
:param decimal_places: number of decimal places
:types decimal_places: int
:return: rounded tensor
:rtype: torch.Tensor
"""
factor = 10**decimal_places
return torch.round(tensor*factor)/factor | dc6d6adeb4607fffb43d0442bcc5a24e38ed74d3 | 683,704 |
def split_parts(msg):
"""Splits a key=value pair into a tuple."""
index = msg.find("=")
return (msg[:index], msg[index+1:]) | 1e7392f3f24556a0bf69789f986a1470ce512cd8 | 683,706 |
import mimetypes
def guess_extension(mime):
"""Shortcut for getting extension to a given mime string.
The parameter mime can be None"""
return mimetypes.guess_extension(type=mime or "") | 6c38133bcf8378a8228c4e39e930794411666691 | 683,707 |
import re
def idify(utext):
"""Make a string ID-friendly (but more unicode-friendly)"""
utext = re.sub(r'[^\w\s-]', '', utext).strip().lower()
utext = re.sub(r'[\s-]+', '-', utext)
if not len(utext):
# Headers must be non-empty
return '_'
return utext | 9ec7b4d49e1cdf256d6362adf7b0a134843da054 | 683,710 |
def my_func02(num01, num02):
"""
返回两个参数的和
:param num01: 数字1
:param num02: 数字2
:return: 两个数字的和
"""
return num01 + num02 | 4426ec968773ec10972565f70b08d1aa61537e6b | 683,715 |
def map_msa_names(df, msa_lookup):
""" Helper function to handle known MSA name changes/inconsistencies
:param df: A pandas dataframe, BLS OEWS data set
:param msa_lookup: a dictionary containing MSA code to peer type lookup
:return df: A pandas dataframe
"""
df['area_title'] = df['area'].map(msa_lookup['area_title'])
return df | d7f07645903a44c4a2e44620778e825eedbae1a5 | 683,722 |
def legend(is_legend_show=True,
legend_orient="horizontal",
legend_pos="center",
legend_top='top',
legend_selectedmode='multiple',
**kwargs):
""" Legend component.
Legend component shows symbol, color and name of different series.
You can click legends to toggle displaying series in the chart.
In ECharts 3, a single echarts instance may contain multiple legend components,
which makes it easier for the layout of multiple legend components.
:param is_legend_show:
It specifies whether to show the legend component.
:param legend_orient:
The layout orientation of legend.It can be 'horizontal', 'vertical'
:param legend_pos:
Distance between legend component and the left side of the container.
legend_pos value can be instant pixel value like 20;
it can also be percentage value relative to container width like '20%';
and it can also be 'left', 'center', or 'right'.
:param legend_top:
Distance between legend component and the top side of the container.
legend_top value can be instant pixel value like 20;
it can also be percentage value relative to container width like '20%';
and it can also be 'top', 'middle', or 'bottom'.
:param legend_selectedmode:
State table of selected legend. 'single' or 'multiple'
:param kwargs:
:return:
"""
_legend = {
"selectedMode": legend_selectedmode,
"show": is_legend_show,
"left": legend_pos,
"top": legend_top,
"orient": legend_orient
}
return _legend | e8110aa8e45ea210162a70fc5607c08e5af84a6a | 683,725 |
def get_average(numbers):
"""
Args:
numbers (list): A list of floats.
Returns:
float: The average of the floats in numbers list.
"""
total = 0.0
for number in numbers:
total += number
return total/ len(numbers) | 2370be5a43e459ef6a0daa586246860b3b6030ce | 683,726 |
def set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv, list_bus_id_power_hiding_priority=None, list_bus_id_voltage_hiding_priority=None):
"""
Returns the list of the hidden power bus ids and a list of hidden voltage ids
:param num_nodes: number of buses in the grid
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
:param list_bus_id_power_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:param list_bus_id_voltage_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:return:
"""
if list_bus_id_power_hiding_priority is None:
list_bus_id_power_hiding_priority = list(range(num_nodes))
if list_bus_id_voltage_hiding_priority is None:
list_bus_id_voltage_hiding_priority = list(range(num_nodes))
hidden_power_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Ns, num_nodes):
hidden_power_bus_id_list.append(list_bus_id_power_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_voltage_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Nv, num_nodes):
hidden_voltage_bus_id_list.append(list_bus_id_voltage_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_power_bus_id_list.sort()
hidden_voltage_bus_id_list.sort()
return hidden_power_bus_id_list, hidden_voltage_bus_id_list | aefdf4e2a4179e732387169a8e0f96e581ee5052 | 683,730 |
def denormalize_m11(x):
"""Inverse of normalize_m11."""
return (x + 1) * 127.5 | a7c6edd415cf80e2574051194e7276acbef562d0 | 683,735 |
def test_row(dataframe):
"""
test if dataframe contains at least one row
Parameters
----------
dataframe: pandas dataframe
Raises
------
ValueError
If number of row is smaller than 1, raise ValueError
Returns
-------
is_valid: boolean
True if greater than 1, False if lower than 1
"""
is_valid = True
if len(dataframe) < 1:
is_valid = False
raise ValueError("dataframe must has at least one row")
return is_valid | 84d73e21a662ae3eac749cee15815507178090a5 | 683,736 |
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros] | 6517ccc434e86640141278e049a8ff62b4faa8d2 | 683,737 |
def pythonize_yang_name(name):
"""
Convert a name like "interface-name" to "InterfaceName" or
"interface" to "Interface
"""
if '-' in name:
py_name = ''
sub_components = name.split('-')
for s in sub_components:
py_name += s.capitalize()
return py_name
else:
return name.capitalize() | 1427d91ad9a929b7676f9d374a1237c3dca144c0 | 683,740 |
import requests
def get_tv_episode_detail(key, id, season, episode, language="en-US"):
"""
function get_tv_episode_detail
Get the TV episode details by id.
inputs: key - TMDB API key.
id - id of the movie
season - Season of the tv series (INT)
episode - Episode number of the series (INT)
language - send in variable 'loc' which is set at runtime - defaults to 'en-US'
returns: status_code - HTTP - Status code - 200 is success anything else is considered an error
jdata - A JSON data structure containing information on the tv series.
"""
url = f"https://api.themoviedb.org/3/tv/{id}/season/{season}/episode/{episode}?api_key={key}&language={language}"
resp = requests.get(url)
print(f"StatusCode: {resp.status_code}")
if resp.status_code == 200:
jdata = resp.json()
else:
jdata = None
return resp.status_code, jdata | ce968c984a454be04d2869ccc61edaef6cc24f93 | 683,743 |
import torch
def _kl_divergence_q_prior_normal(mu, logvar, per_dim=False):
"""
Returns KL-divergence between the variational posterior
$q_{\phi}(z|x)$ and the isotropic Gaussian prior $p(z)$.
This forms the 'regularization' part of the ELBO.
If the variational posterior is taken to be normal with
diagonal covariance. Then:
$ D_{KL}(q_{\phi(z|x)}||p(z)) = -1/2 * \sum_j (1 + log \sigma_j^2 - \mu_j^2 - \sigma_j^2) $
"""
assert mu.shape == logvar.shape, 'Mean and log-variance must share shape (batch, latent_dim)'
batch_size, latent_dim = mu.shape
latent_kl = 0.5 * (-1 - logvar + mu.pow(2) + logvar.exp()).mean(dim=0)
total_kl = torch.sum(latent_kl)
# kl_div = -0.5 * (torch.sum(1 + logvar - mu*mu - torch.exp(logvar)))
if per_dim:
return total_kl, latent_kl
else:
return total_kl | 666e918c9ec54cc1f7d5002a811fa117083c00f8 | 683,749 |
def get_blue_green_from_app(app):
"""
Returns the blue_green object if exists and it's color field if exists
>>> get_blue_green_from_app({})
(None, None)
>>> get_blue_green_from_app({'blue_green': None})
(None, None)
>>> get_blue_green_from_app({'blue_green': {}})
(None, None)
>>> get_blue_green_from_app({'blue_green': {'color': None}})
({'color': None}, None)
>>> get_blue_green_from_app({'blue_green': {'color': ''}})
({'color': ''}, '')
>>> get_blue_green_from_app({'blue_green': {'color': 'blue'}})
({'color': 'blue'}, 'blue')
>>> get_blue_green_from_app({'blue_green': {'color': 'green'}})
({'color': 'green'}, 'green')
"""
if app.get('blue_green'):
return app['blue_green'], app['blue_green'].get('color', None)
return None, None | c24c297f300fd4978aa1fd28245d835ef01ff387 | 683,763 |
def get_last_conv_layer_name(model_keras):
"""
Search for the last convolutional layer
Args:
model_keras: A keras model object
Returns:
Name of the layer (str)
"""
for layer in reversed(model_keras.layers):#loop in reverse order
# Select closest 4D layer to the end of the network.
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find a convolutional layer (layer with 4D).") | f62826d3adfb593ed52db2ced8f2c98fbfa9f947 | 683,764 |
def flatten_args(args):
"""
Given a dictionary of arguments, produce a string suitable for inclusion
in a command line, such as "--name1 value1 --name2 value2"
"""
return " ".join(["%s %s" % (name, value)
for name, value in args.iteritems()]) | 0768cf7badc54cb01303f93f0a740864e5fc19a7 | 683,766 |
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10 ** e
else:
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10 ** -e | 49fd101b693c4ef4bfd5612a5a34066f34e54219 | 683,768 |
def calculate_polynomial_derivative_term(coefficient, variable, order):
"""Calculates the derivative of the nth order term of a polynomial.
Args:
coefficient (float): The coefficient of the nth order term in the
polynomial
variable (float): float to plug in for the variable in the polynomial
order (int): order of the nth order term in the polynomial (so, n.)
Returns:
float: The result of taking the derivative of the nth
order term a polynomial,
:math:`n \\cdot \\text{coefficient} \\cdot \\text{variable}^{n-1}`
So, the edge case of taking the derivative of the zeroth-order
term is taken care of, since you explicity multiply by the
order of the polynomial (which is zero in the n = 0 case.)
Raises:
TypeError: A non-integer was passed as the order.
"""
if type(order) != int:
raise TypeError('Non-integer order in polynomial term')
else:
return order * coefficient * variable**(order - 1) | 46fc609334b41099fcb99784670bd93f54846353 | 683,770 |
def single_line(line, report_errors=True, joiner='+'):
"""Force a string to be a single line with no carriage returns, and report
a warning if there was more than one line."""
lines = line.strip().splitlines()
if report_errors and len(lines) > 1:
print('multiline result:', lines)
return joiner.join(lines) | e150f0d9039f3f4bc0cb1213d5670bc7519b1bbf | 683,771 |
import unittest
def combined_suites(*test_suites):
"""Combines several suites into one"""
combined_suite = unittest.TestSuite(test_suites)
return combined_suite | 9016cc680dde2ef1d44280cb0bd469b78fec4db6 | 683,772 |
import unicodedata
def strip_diacritics_2(input_string: str) -> str:
"""Return a copy of `input_string` without diacritics, such that
strip_diacritics('skříň') == 'skrin'
"""
trans_dict = {char: int(unicodedata.decomposition(char).split()[0],
base=16)
for char in input_string
if ord(char) > 0x7f}
trans_table = str.maketrans(trans_dict)
return input_string.translate(trans_table) | 40cbb8522fdd20adb5e2e355613fdb01581ff80b | 683,777 |
def liquidViscosity(T, lVP):
"""
liquidViscosity(T, lVP)
liquidViscosity (centipoise) = 10^(A + B/T + C*T + D*T^2)
Parameters
T, temperature in K
vPP, A=lVP[0], B=lVP[1], C=lVP[2], D=lVP[3]
A, B, C, D and E are regression coefficients
Returns
liquid viscosity in centipoise at T
"""
return 10**(lVP[0] + lVP[1]/T + lVP[2]*T + lVP[3]*T**2) | 4bf76ae09b844f5ccfa0afa91df364beecd543b4 | 683,779 |
def valid_xml_char_ordinal(c):
"""Filters out certain bytes so that XML files contains valid
characters. XML standard defines a valid character as:
Char ::= #x9 | #xA | #xD | [#x20 - #xD7FF] |
[#xE000 - #xFFFD] | [#x10000 - #x10FFFF]
Args:
c: Character to be checked
Returns:
true if character codepoint in valid range
"""
codepoint = ord(c)
#conditions ordered by presumed frequency
return (
0x20 <= codepoint <= 0xD7FF or
codepoint in (0x9, 0xA, 0xD) or
0xE000 <= codepoint <= 0xFFFD or
0x10000 <= codepoint <= 0x10FFFF
) | 32ae643ec970f00d4e65fd511de09bc8370ae9c6 | 683,780 |
def add_vp_vs(df):
"""Calculates the Vp and Vs for a las file
Args:
df (Pandas.DataFrame): input dataframe MUST CONTAIN `DTCO` and `DTSM`
Returns:
pandas.DataFrame: input dataframe with vp and vs calculated
"""
df['Vp'] = (1000000 / df['DTCO']) / 3.281
df['Vs'] = (1000000 / df['DTSM']) / 3.281
df['Vp_max'] = df['Vp'].max() + 200
return df | 1b237862b88e5c6f690f26ce60bee23445d3a57a | 683,783 |
def calculate_simpson_index(set1, set2):
"""Calculates the Simpson index of two sets"""
size_intersection = float(len(set1.intersection(set2)))
size_smaller_set = min(float(len(set1)), float(len(set2)))
return size_intersection / size_smaller_set | 0df02e34c283f604a87fe68b8e0b76cd87d09b36 | 683,784 |
def return_label(file_path):
"""
Extract label from filename
Inputs:
---------------
file_name: Source of raw wav signal str
Outputs:
---------------
y: target as string
"""
if "silence" in file_path.lower():
y = 'silence'
elif "song" in file_path.lower():
y = 'singing'
else:
y = 'speaking'
return y | e81f61edd2f1535b8061089103d5d06ab2d6af85 | 683,788 |
from typing import List
def get_sum_of_elements(lst: List) -> int:
"""Sum of list."""
return sum(lst) | e73fa9a407f655798979dd2a3101c5d7c35a1009 | 683,791 |
from typing import Optional
def get_location_id(data: dict) -> Optional[str]:
"""
Returns location_id from a data dictionary, or defaults to None
:param dict data: The event data
:return str|None: A string containing the location id, or None
"""
try:
return data["event"]["data"]["new"]["location_id"]
except (TypeError, KeyError):
return None | 3250113d03ec1a8ef907ad3195bac738a10135ec | 683,792 |
import re
def get_cell_barcode(record, cell_barcode_pattern):
"""Return the cell barcode in the record name.
Parameters
----------
record : screed record
screed record containing the cell barcode
cell_barcode_pattern: regex pattern
cell barcode pattern to detect in the record name
Returns
-------
barcode : str
Return cell barcode from the name, if it doesn't exit, returns None
"""
found_cell_barcode = re.findall(cell_barcode_pattern, record['name'])
if found_cell_barcode:
return found_cell_barcode[0][1] | 2b8d39cb2b49cde1f6de060dad7d780f88fd9062 | 683,795 |
from typing import List
def spooler_pids() -> List[int]:
"""Returns a list of all spooler processes IDs."""
return [] | 9b2791d1c09d2db7eb2cd49322638e5d115cc625 | 683,798 |
import base64
def encode_file_to_base64(fpath_in, prefix):
""" encode_file_to_base64: gets base64 encoding of file
Args:
fpath_in (str): path to file to encode
prefix (str): file data for encoding (e.g. 'data:image/png;base64,')
Returns: base64 encoding of file
"""
with open(fpath_in, 'rb') as file_obj:
return prefix + base64.b64encode(file_obj.read()).decode('utf-8') | 081cc794eeca30b3fb0b71a68238b07763823444 | 683,799 |
def le_bytes_to_int(as_bytes: bytes, signed: bool) -> int:
"""Converts a little endian byte array to an integer.
:param as_bytes: A little endian encoded byte array integer.
:param signed: Flag indicating whether integer is signed.
"""
return int.from_bytes(as_bytes, byteorder='little', signed=signed) | 6ca6f5a30a3576bd0d694d66943d0c8781997472 | 683,805 |
import string
def base26(x, _alphabet=string.ascii_uppercase):
"""Return positive ``int`` ``x`` as string in bijective base26 notation.
>>> [base26(i) for i in [0, 1, 2, 26, 27, 28, 702, 703, 704]]
['', 'A', 'B', 'Z', 'AA', 'AB', 'ZZ', 'AAA', 'AAB']
>>> base26(344799) # 19 * 26**3 + 16 * 26**2 + 1 * 26**1 + 13 * 26**0
'SPAM'
>>> base26(256)
'IV'
"""
result = []
while x:
x, digit = divmod(x, 26)
if not digit:
x -= 1
digit = 26
result.append(_alphabet[digit - 1])
return ''.join(result[::-1]) | 44f7e68e97c0d4ac72701ce807a3b3192936ed2a | 683,806 |
def pgcd(a, b):
"""Renvoie le Plus Grand Diviseur Communs des entiers ``a`` et ``b``.
Arguments:
a (int) : un nombre entier
b (int) : un nombre entier
"""
if a < 0 or b < 0:
return pgcd(abs(a), abs(b))
if b == 0:
if a == 0:
raise ZeroDivisionError(
"Le PGCD de deux nombres nuls n'existe pas")
return a
return pgcd(b, a % b) | af036c18c6895afe38c2f55fc9051c1f68897134 | 683,812 |
from typing import Dict
from typing import Tuple
from typing import Set
def prepare_senses_index_for_search(senses_dict: Dict[str, Dict[str, Tuple[tuple, Tuple[int, int]]]]) -> \
Dict[str, Set[str]]:
""" Build a search index for a fast selection of sentence candidates, which contain some sense from the RuWordNet.
The RuWordNet contains a lot of terms (senses in the RuWordNet terminology), and if we want to find possible
occurrences in each input sentence using the exhaustive search, then we will do it very-very long, with
time complexity is O(n). So, we can divide the search procedure into two steps:
1) we select a sub-set of all RuWordNet's terms, which potentially can be a part of some sentence, using
a hash table of single words from all terms, and we do it with the constant time complexity O(1), because
it is the search complexity in the hash table;
2) we apply a full linear search for the selected sub-set of terms instead of all RuWordNet's terms.
And this function needs for building such search index in a form of the hash table (i.e., the Python's dictionary),
where keys are single words of the RuWordNet terms, and values are sense IDs of terms with these words.
:param senses_dict: a dictionary with inflected terms (see `ruwordnet_parsing.load_and_inflect_senses` function).
:return: the created search index.
"""
index = dict()
for sense_id in senses_dict:
for morpho_tag in senses_dict[sense_id]:
tokens = senses_dict[sense_id][morpho_tag][0]
main_word_start, main_word_end = senses_dict[sense_id][morpho_tag][1]
for main_token in filter(lambda it: it.isalnum(), tokens[main_word_start:main_word_end]):
if main_token in index:
index[main_token].add(sense_id)
else:
index[main_token] = {sense_id}
return index | e5b53846b355450d9383d4359c83ec0c2e029bff | 683,816 |
from typing import Sequence
def to_sequence(obj):
"""Convert an object to sequence.
Parameters
----------
obj : `object`
Returns
-------
`collections.Sequence`
Examples
--------
>>> to_sequence(None)
()
>>> to_sequence(1)
(1,)
>>> to_sequence('str')
('str',)
>>> x = [0, 1, 2]
>>> to_sequence(x)
[0, 1, 2]
>>> to_sequence(x) is x
True
"""
if obj is None:
return ()
if isinstance(obj, str) or not isinstance(obj, Sequence):
return (obj,)
return obj | 9e9aaa5c0c0990b69b988309aa8fd068db015563 | 683,817 |
def select(t, *columns):
""" Select columns from table
>>> t = Symbol('t', 'var * {x: int, y: int, z: int}')
>>> select(t, t.x, t.z)
t[['x', 'z']]
"""
return t[[c._name for c in columns]] | 90144b096a91aed341d0047627b0a9c3f158ce22 | 683,820 |
from typing import Dict
import pickle
def _from_checkpoint(
fname: str='checkpoint.pkl') -> Dict:
""" Load a checkpoint file """
with open(fname, 'rb') as f:
checkpoint = pickle.load(f)
return checkpoint | 66674dde389936e45c3d8935bcd8174c4844f5cc | 683,822 |
from typing import List
from typing import Dict
from typing import Any
def _transform_dto_list_to_list_of_dicts(dto_list) -> List[Dict[str, Any]]:
"""
Given a list of DTO objects, this function returns a list of dicts, that can be passed to jsonify function.
"""
return [vars(dto_obj) for dto_obj in dto_list] | 41d4d587aa78cf1e3879c22c2d95e28f9e4b0507 | 683,823 |
def join_rows(rows, joiner=' '):
"""
Given a series of rows, return them as a single row where the inner edge cells are merged. By default joins with a single space character, but you can specify new-line, empty string, or anything else with the 'joiner' kwarg.
"""
rows = list(rows)
fixed_row = rows[0][:]
for row in rows[1:]:
if len(row) == 0:
row = ['']
fixed_row[-1] += "%s%s" % (joiner, row[0])
fixed_row.extend(row[1:])
return fixed_row | caefb9f78c0213dad42a0a82cb381a76f619814a | 683,825 |
def parse_float(val):
"""parses string as float, ignores -- as 0"""
if val == '--':
return 0
return float(val) | 116bab60da24a492561a6893e280027543eb3764 | 683,829 |
def running_mean(l, N):
"""From a list of values (N), calculate the running mean with a
window of (l) items. How larger the value l is, the more smooth the graph.
"""
sum = 0
result = list(0 for x in l)
for i in range(0, N):
sum = sum + l[i]
result[i] = sum / (i + 1)
for i in range(N, len(l)):
sum = sum - l[i - N] + l[i]
result[i] = sum / N
return result | 6ecd738d9b0dc6c72201149ad236858c462ecc8a | 683,832 |
def get_outbreaks(flowmat, incidence, R0=2.5, asymf=10, attenuate=1.0):
"""
Calculate the probabilities of outbreak for all regions
:param flowmat: Arriving passengers row -> column
:param incidence: fraction of infectious in the populations
:param R0: Basic reproduction number
:param asymf: how many asymptomatics per reported case
:param attenuate: Attenuation factor for flow
:return:
"""
# Adjusting arrivals by incidence
inflows = (flowmat.T * attenuate) @ incidence
probs = 1 - (1 / R0) ** (inflows * 8 * asymf)
return probs | 3d7b1ba3a61ebe574a4672fb7c1876dee4a7cc48 | 683,833 |
def bin_to_hex(x):
"""Convert Binary to Hex."""
y = hex(int(x, 2))[2:]
if len(y) < 8:
y = (8 - len(y)) * "0" + y
return y | 045e879a6413666d06f8acbfd6b1f617d8178549 | 683,835 |
import re
def process_tweets(text):
"""Exclude mentions, urls, and html reference characters in a string using regular expression"""
text = re.sub("(\@|https:\/\/)\S+", "", text) # remove mentions and urls
text = re.sub(r"&[a-z]+;", "", text) # exclude html reference characters
return text | 934d78f691767bc12bdc40701a1953c0eba1d97d | 683,837 |
def f90bool(s):
"""Convert string repr of Fortran logical to Python logical."""
assert type(s) == str
try:
s_bool = s[1].lower() if s.startswith('.') else s[0].lower()
except IndexError:
raise ValueError('{0} is not a valid logical constant.'.format(s))
if s_bool == 't':
return True
elif s_bool == 'f':
return False
else:
raise ValueError('{0} is not a valid logical constant.'.format(s)) | d2e92906c355710762e472c42191834ba8806f5d | 683,838 |
from functools import cmp_to_key
def argsort(mylist, comp=None):
"""Returns the indices that sort a list.
Parameters
----------
mylist : list of objects
List to sort.
comp : function, optional
A comparison function used
to compare two objects in the list.
Defaults to None.
Returns
-------
list of int
The permutation that sorts the list.
"""
# Based on https://stackoverflow.com/questions/3382352/equivalent-of-numpy-argsort-in-basic-python
if comp is None:
return sorted(range(len(mylist)), key=mylist.__getitem__)
else:
return sorted(range(len(mylist)), key=cmp_to_key(comp)) | d73c592113058a5f7c28034fa840eb0c618ea26a | 683,841 |
from typing import Optional
import torch
def file2ckpt(path: str, device: Optional[str] = None) -> dict:
"""
Load the ckpt file into a dictionary to restart a past simulation. It is a thin wrapper around torch.load.
Args:
path: A string specifying the location of the ckpt file (required)
device: A string either "cuda" or "cpu" specifying the device of the intended (optional).
This is usefull when a model trained on a GPU machine need to be loaded into a CPU machine or viceversa.
Examples:
>>> # to load a ckpt of a model which was trained on GPU into a CPU machine.
>>> ckpt = file2ckpt(path="pretrained_GPU_model.pt", device="cpu")
>>> vae = CompositionalVae(params=ckpt.get("params"))
"""
if device is None:
ckpt = torch.load(path)
elif device == 'cuda':
ckpt = torch.load(path, map_location="cuda:0")
elif device == 'cpu':
ckpt = torch.load(path, map_location=torch.device('cpu'))
else:
raise Exception("device is not recognized")
return ckpt | 220c6ff92a74e6f87d1c75b55bcf6142f668c3e1 | 683,842 |
from typing import Sequence
from typing import Sized
def are_none(sequences: Sequence[Sized]) -> bool:
"""
Returns True if all sequences are None.
"""
if not sequences:
return True
return all(s is None for s in sequences) | ed067bf08fef251fdf9835c144d72bd4746564c1 | 683,844 |
def concat(str_one, str_two):
"""
Returns the concatenation of 2 strings. A string
with null value is considered as an empty string.
"""
if not str_one:
str_one = ""
if not str_two:
str_two = ""
return str_one + str_two | 949cf41745f8800ce033a102ff78d525d7676696 | 683,846 |
from shutil import which
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
return which(name) is not None | baebb314d631973cbca94499358a667a842786fd | 683,848 |
import functools
def cached(func):
"""Decorator to cache the result of a function call."""
func.cache = {}
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwargs:
key = args, frozenset(kwargs.items())
else:
key = args
if key not in func.cache:
func.cache[key] = func(*args, **kwargs)
return func.cache[key]
return wrapper | 132063628387f13b43969536f66b2093e63d6b45 | 683,849 |
import re
from datetime import datetime
def _get_iso_date(date_string: str) -> str:
""" convert date from the form 1/22/2021 13:28:27 to iso format """
regex = r'\d{1,2}/\d{1,2}/\d{4} \d{1,2}:\d{1,2}:\d{1,2}'
found_list = re.findall(regex, date_string)
if found_list:
date_value = datetime.strptime(date_string, '%m/%d/%Y %H:%M:%S')
return date_value.isoformat()
return date_string | fa9d6c733014ad4d738d63f2399085f08949c108 | 683,851 |
def remove_erroneous_blocks(blocks, delta_time=2.0, n_blocks=3):
""" Remove sessions with erroneous data due to a NeuroPsy Research App malfunction.
The error causes block data to be duplicated and the values for df1 & df2 multiplied again by 100.
The duplicated blocks are identified by comparing their time stamps to the previous block (less than 2 seconds
difference). If the error caused the session to end early, the whole session is removed.
NeuroPsyResearchApp issue #1.
:param pandas.DataFrame blocks: Data about blocks.
:param float delta_time: Threshold in seconds for which a consecutive block in a session is considered invalid
if it was completed within this period after the previous. Default is 2.0 seconds.
:param int n_blocks: Required number of blocks per session. If a session doesn't have this many blocks,
it gets removed.
:returns: Cleaned block data. Number of errors found. List of sessions that were removed as a consequence.
:rtype: tuple[pandas.DataFrame, int, list]
"""
# Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart.
mask = blocks.groupby(['session_uid'])['time'].diff() < delta_time
try:
n_errors = mask.value_counts()[True]
except KeyError:
n_errors = 0
blocks = blocks.loc[~mask, :]
# Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session.
invalid_sessions = blocks['session_uid'].value_counts() != n_blocks
invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list()
blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :]
return blocks, n_errors, invalid_sessions | a0df0fac6835b85141f45de6ff4ed61ecfdfe413 | 683,853 |
def get_sitemap(app, excludes=("/", "/static/<path:filename>")):
"""Returns a sitemap for the given application.
Args:
app (flask.Flask): Application to be scanned.
excludes (tuple): Tuple of endpoints to be hidden.
Returns:
list: Returns a list containing valid endpoint urls and their methods. Example:
[
{"url": "/", "methods": ["GET"]},
{"url": "/username", "methods": ["GET", "POST"]}
]
"""
endpoints = []
for rule in app.url_map.iter_rules():
if str(rule) in excludes:
continue
endpoint = {}
endpoint["url"] = str(rule)
endpoint["methods"] = ",".join(rule.methods)
endpoints.append(endpoint)
endpoints.sort(key= lambda i: i["url"])
return endpoints | 578541f58e3bd4a2b38da802b18f6f46226c66c9 | 683,854 |
def estimate_infectious_rate_constant(events,
t_start,
t_end,
kernel_integral,
count_events=None):
"""
Returns estimation of infectious rate for given events on defined interval.
The infectious is expected to be constant on given interval.
:param events: array of event tuples containing (event_time, follower_cnt)
:param t_start: time interval start
:param t_end: time interval end
:param kernel_integral: integral function of kernel function
:param count_events: count of observed events in interval (used for time window approach)
:return: estimated value for infectious rate
"""
kernel_int = [
fol_cnt * kernel_integral(t_start - event_time, t_end - event_time)
for event_time, fol_cnt in events
]
if count_events is not None:
return count_events / sum(kernel_int)
else:
return (len(events)) / sum(kernel_int) | d1a83bd79988de9467dbc6c952aec35e3d496cd2 | 683,855 |
def survey_media(instance, filename):
"""Return an upload path for survey media."""
if not instance.survey.id:
instance.survey.save()
return 'survey/{0}/{1}'.format(instance.survey.id, filename) | 5a7aadf99634a19ef6d3dbba59a1a3935548defe | 683,856 |
import requests
def check_main_service_healthcheck(SVC_URL):
""" Check the main service url health. Returns True of False based on HTTP response code"""
try:
r =requests.get(SVC_URL+'/NexTrip')
if r.status_code ==200:
return True
else:
return False
except Exception as e:
return False | 227a37cbdf769ba63bdb50654d6c2992ba2aaa71 | 683,857 |
from textwrap import dedent
import inspect
def get_func_code(f):
"""Get the code of function f without extra indents"""
return dedent(inspect.getsource(f)) | 95b0de92937adaa9a2e64a1855aa4e7298d7e259 | 683,859 |
def mmedian(lst):
"""
get the median value
"""
sortedLst = sorted(lst)
lstLen = len(lst)
if lstLen==0:
return 0.0
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1])/2.0 | b0aab9d47d7f2ee12d442b84cdb6ae5aa00578dd | 683,860 |
def _format_spreadsheet_headers(token):
"""
Return formatted authorization headers for further
interactions with spreadsheet api.
"""
return {
"Authorization": f"Bearer {token}"
} | 5dc8cd880c598246b7c47ccbc0138daac03c3131 | 683,867 |
import re
def getFilename(name):
"""Get a filename from given name without dangerous or incompatible
characters."""
# first replace all illegal chars
name = re.sub(r"[^0-9a-zA-Z_\-\.]", "_", name)
# then remove double dots and underscores
while ".." in name:
name = name.replace('..', '.')
while "__" in name:
name = name.replace('__', '_')
# remove a leading dot or minus
if name.startswith((".", "-")):
name = name[1:]
return name | 6444ed2a215171d99e281172fcd302923b854603 | 683,871 |
from pathlib import Path
def _get_old_file(new_file: Path) -> Path:
"""Return the same file without the .new suffix"""
assert new_file.name.endswith('.new') # noqa
return new_file.with_name(new_file.stem) | ac0738099d088aa1af184b8b37bf3423180b7f24 | 683,872 |
def xor_fixed_buffers(buf1, buf2):
"""
Creates XOR buffered string from two hex string buffers
:param buf1: hex encoded string
:param buf2: hex encoded string
:return: xor hex encoded string
"""
# Convert hex to bytearray
decoded_hex_buf1 = bytearray.fromhex(buf1)
decoded_hex_buf2 = bytearray.fromhex(buf2)
# XOR by byte
xor_buf = bytearray(len(decoded_hex_buf1))
for i in range(len(xor_buf)):
xor_buf[i] = decoded_hex_buf1[i] ^ decoded_hex_buf2[i]
# Convert back to hex string
xor_buf = bytes(xor_buf).hex()
return xor_buf | ea9f7f88f8653ef55774383a2734c14324283bf1 | 683,873 |
def previous(some_list, current_index):
"""
Returns the previous element of the list using the current
index if it exists. Otherwise returns an empty string.
"""
try:
return some_list[int(current_index) - 1] # access the previous element
except:
return '' | d886b30c304a448dd76ecb3b10c20a5ef1f31aee | 683,875 |
def get_truck(client, truck_id):
"""
returns the truck specified.
:param client: The test client to make the request with
:param truck_id: The id of the truck to find
:return: truck with id=id
"""
return client.get(f'/api/trucks/{truck_id}') | a03814daa72a91859c3668dc3b852fd20a6ca9e9 | 683,877 |
def ros_service_response_cmd(service, result, _id=None, values=None):
"""
create a rosbridge service_response command object
a response to a ROS service call
:param service: name of the service that was called
:param result: boolean return value of service callback. True means success, False failure.
:param _id: if an ID was provided to the call_service request, then the service response will contain the ID
:param values: dict of the return values. If the service had no return values, then this field
can be omitted (and will be by the rosbridge server)
"""
command = {
"op": "service_response",
"service": service,
"result": result
}
if _id:
command["id"] = _id
if values:
command["values"] = values
return command | 963c2b69d07dd01c393be11050310ac325d37516 | 683,882 |
def escapeAttrJavaScriptStringDQ(sText):
""" Escapes a javascript string that is to be emitted between double quotes. """
if '"' not in sText:
chMin = min(sText);
if ord(chMin) >= 0x20:
return sText;
sRet = '';
for ch in sText:
if ch == '"':
sRet += '\\"';
elif ord(ch) >= 0x20:
sRet += ch;
elif ch == '\n':
sRet += '\\n';
elif ch == '\r':
sRet += '\\r';
elif ch == '\t':
sRet += '\\t';
else:
sRet += '\\x%02x' % (ch,);
return sRet; | 1c2f0f9e77e988c68d3fa48caa180761595c0b7c | 683,883 |
def center_scale_to_corners(yx, hw):
"""Convert bounding boxes from "center+scale" form to "corners" form"""
hw_half = 0.5 * hw
p0 = yx - hw_half
p1 = yx + hw_half
return p0, p1 | 1dcc9204da54905c880870d7e5c7ae440c289415 | 683,885 |
from typing import Union
def get_error_message(traceback: str) -> Union[str, None]:
"""Extracts the error message from the traceback.
If no error message is found, will return None.
Here's an example:
input:
Traceback (most recent call last):
File "example_code.py", line 2, in <module>
import kivy
ModuleNotFoundError: No module named 'kivy'
output:
ModuleNotFoundError: No module named 'kivy'
"""
error_lines = traceback.splitlines()
return error_lines[-1] | bb1ccbb55e15a9670efbb4ddc71b22a35cfd9b17 | 683,887 |
def read(filepath, readfunc, treant):
"""Read data from a treant
Args:
filepath: the filepath to read from
readfunc: the read callback
treant: the treant to read from
Returns:
the data
"""
return readfunc(treant[filepath].abspath) | 24e94b244dacd603158a9d779a167133cdd2af50 | 683,890 |
import re
def is_project_issue(text):
"""
Issues/pull requests from Apache projects in Jira.
See: https://issues.apache.org/jira/secure/BrowseProjects.jspa#all
>>> is_project_issue('thrift-3615')
True
>>> is_project_issue('sling-5511')
True
>>> is_project_issue('sling')
False
>>> is_project_issue('project-8.1')
False
Special cases:
>>> is_project_issue('utf-8')
False
>>> is_project_issue('latin-1')
False
>>> is_project_issue('iso-8858')
False
"""
return bool(re.match(r'''
(?!utf-) # Some special cases...
(?!latin-)
(?!iso-)
\w+-\d+$
''', text, re.VERBOSE | re.UNICODE)) | f96f63488c97311cdc79f3fa2dd526023e300969 | 683,893 |
def read_input(fpath):
"""
Read the global input file.
Args:
fpath (str): Path to the input file to read.
Returns:
list
"""
with open(fpath, 'r') as f:
return [line.strip() for line in f.readlines()] | 11d14dcd8db05b6f23e53d8233448da23495907a | 683,898 |
def _cell_fracs_sort_vol_frac_reverse(cell_fracs):
"""
Sort cell_fracs according to the order of increasing idx and decreasing
with vol_frac.
Parameters
----------
cell_fracs : structured array
The output from dagmc.discretize_geom(). A sorted, one dimensional
array, each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
The array must be sorted with respect to both idx and cell, with
cell changing fastest.
Returns
-------
cell_fracs : structured array
Sorted cell_fracs.
"""
# sort ascending along idx and vol_frac
# ndarray.sort can't sort using desending sequence.
# Multiply the vol_frac to -1.0 to sort the vol_frac in reverse order.
cell_fracs['vol_frac'] *= -1.0
cell_fracs.sort(order=['idx', 'vol_frac'])
cell_fracs['vol_frac'] *= -1.0
return cell_fracs | 74ab5c5cee05e83612dace0c4e1ec4ea80ca4858 | 683,905 |
import math
def fromSpherical(r, theta, phi):
""" convert spherical coordinates to 3-d cartesian coordinates """
return r*math.sin(theta)*math.cos(phi), r*math.sin(theta)*math.sin(phi), r*math.cos(theta) | fda9ddb7a777504452822a34aa15671965de5566 | 683,911 |
import torch
def cross_product_matrix(v):
"""skew symmetric form of cross-product matrix
Args:
v: tensor of shape `[...,3]`
Returns:
The skew symmetric form `[...,3,3]`
"""
v0 = v[..., 0]
v1 = v[..., 1]
v2 = v[..., 2]
zero = torch.zeros_like(v0)
mat = torch.stack([
zero, -v2, v1,
v2, zero, -v0,
-v1, v0, zero], dim=-1).view(list(v0.shape)+[3, 3])
return mat | 5cf69cc7f57b085070418234c8422f0646013a0c | 683,913 |
def is_generator(iterable):
"""
Check if an iterable is a generator.
Args:
iterable: Iterable.
Returns:
boolean
"""
return hasattr(iterable, '__iter__') and not hasattr(iterable, '__len__') | b441a96c42c2c57d3ec26ecf64d5b4392d837c0b | 683,918 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.